ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a3bc11e261f6bc3c76533a9f65e53bbbef51dfc | def areaCircle(a):
area1 = 3.14*a*a
return area1
def areaRectangle(a, b):
area1 = a*b
return area1
def areaCylinder(a, b):
area = 3.14*a*a*b
return area
def rename():
a = input("Enter new data")
a1 = a
return a1
|
py | 1a3bc1874fcead1077b0458e14e5a84fb7129b67 | """
This module contains the panel API.
"""
import logging
from pyqode.core.api.mode import Mode
from pyqode.qt import QtWidgets, QtGui
def _logger():
""" Returns module's logger """
return logging.getLogger(__name__)
class Panel(QtWidgets.QWidget, Mode):
"""
Base class for editor panels.
A panel is a mode and a QWidget.
.. note:: Use enabled to disable panel actions and setVisible to change the
visibility of the panel.
"""
class Position(object):
"""
Enumerates the possible panel positions
"""
#: Top margin
TOP = 0
#: Left margin
LEFT = 1
#: Right margin
RIGHT = 2
#: Bottom margin
BOTTOM = 3
@classmethod
def iterable(cls):
""" Returns possible positions as an iterable (list) """
return [cls.TOP, cls.LEFT, cls.RIGHT, cls.BOTTOM]
@property
def scrollable(self):
"""
A scrollable panel will follow the editor's scroll-bars. Left and right
panels follow the vertical scrollbar. Top and bottom panels follow the
horizontal scrollbar.
:type: bool
"""
return self._scrollable
@scrollable.setter
def scrollable(self, value):
self._scrollable = value
def __init__(self, dynamic=False):
Mode.__init__(self)
QtWidgets.QWidget.__init__(self)
#: Specifies whether the panel is dynamic. A dynamic panel is a panel
#: that will be shown/hidden depending on the context.
#: Dynamic panel should not appear in any GUI menu (e.g. no display
#: in the panels menu of the notepad example).
self.dynamic = dynamic
#: Panel order into the zone it is installed to. This value is
#: automatically set when installing the panel but it can be changed
#: later (negative values can also be used).
self.order_in_zone = -1
self._scrollable = False
self._background_brush = None
self._foreground_pen = None
#: Position in the editor (top, left, right, bottom)
self.position = -1
def on_install(self, editor):
"""
Extends :meth:`pyqode.core.api.Mode.on_install` method to set the
editor instance as the parent widget.
.. warning:: Don't forget to call **super** if you override this
method!
:param editor: editor instance
:type editor: pyqode.core.api.CodeEdit
"""
Mode.on_install(self, editor)
self.setParent(editor)
self.setPalette(QtWidgets.QApplication.instance().palette())
self.setFont(QtWidgets.QApplication.instance().font())
self.editor.panels.refresh()
self._background_brush = QtGui.QBrush(QtGui.QColor(
self.palette().window().color()))
self._foreground_pen = QtGui.QPen(QtGui.QColor(
self.palette().windowText().color()))
def paintEvent(self, event):
# Fills the panel background using QPalette
if self.isVisible():
# fill background
self._background_brush = QtGui.QBrush(QtGui.QColor(
self.palette().window().color()))
self._foreground_pen = QtGui.QPen(QtGui.QColor(
self.palette().windowText().color()))
painter = QtGui.QPainter(self)
painter.fillRect(event.rect(), self._background_brush)
def setVisible(self, visible):
"""
Shows/Hides the panel
Automatically call CodeEdit.refresh_panels.
:param visible: Visible state
"""
_logger().log(5, '%s visibility changed', self.name)
super(Panel, self).setVisible(visible)
if self.editor:
self.editor.panels.refresh()
|
py | 1a3bc3e0ddc0fe75e0a5e80517420be52a102881 | # This sample tests the semantic analyzer's handling of
# try/except/raise statements
def func1():
try:
pass
except:
raise
# This should generate an error because it's
# a "naked" raise statement.
raise
def foo(x, y) -> bool:
try:
z = x / y
except Exception as e:
return False
except:
raise Exception()
else:
return True
# This should not generate an error
# because this code is unreachable.
return 'hello'
|
py | 1a3bc5701907e05ac5b07c15a40e457301c455af | import numpy as np
from matplotlib import pyplot as plt
import matplotlib
def main():
fig, ax = plt.subplots(1,1)
sizes = list()
f = open('_cellsize.dat', 'r')
max_nodes = f.readline()
for line in f.readlines():
sizes.append(int(line.strip()))
f.close()
ax.hist(sizes)
ax.axvline(float(max_nodes))
plt.savefig('cell-size-histogram.png', dpi = 300)
if __name__ == '__main__':
main()
|
py | 1a3bc6597ca89cabec762c6c4e3d68d4b5a4a24f | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Talk to an impalad through beeswax.
# Usage:
# * impalad is a string with the host and port of the impalad
# with which the connection should be established.
# The format is "<hostname>:<port>"
# * query_string is the query to be executed, as a string.
# client = ImpalaBeeswaxClient(impalad)
# client.connect()
# result = client.execute(query_string)
# where result is an object of the class ImpalaBeeswaxResult.
import logging
import time
import shlex
import getpass
import re
from beeswaxd import BeeswaxService
from beeswaxd.BeeswaxService import QueryState
from datetime import datetime
try:
# If Exec Summary is not implemented in Impala, this cannot be imported
from ExecStats.ttypes import TExecStats
except ImportError:
pass
from ImpalaService import ImpalaService
from tests.util.thrift_util import create_transport
from thrift.transport.TTransport import TTransportException
from thrift.protocol import TBinaryProtocol
from thrift.Thrift import TApplicationException
LOG = logging.getLogger('impala_beeswax')
# Custom exception wrapper.
# All exceptions coming from thrift/beeswax etc. go through this wrapper.
# __str__ preserves the exception type.
# TODO: Add the ability to print some of the stack.
class ImpalaBeeswaxException(Exception):
__name__ = "ImpalaBeeswaxException"
def __init__(self, message, inner_exception):
self.__message = message
self.inner_exception = inner_exception
def __str__(self):
return "%s:\n %s" % (self.__name__, self.__message)
class ImpalaBeeswaxResult(object):
def __init__(self, **kwargs):
self.query = kwargs.get('query', None)
self.success = kwargs.get('success', False)
# Insert returns an int, convert into list to have a uniform data type.
# TODO: We should revisit this if we have more datatypes to deal with.
self.data = kwargs.get('data', None)
if not isinstance(self.data, list):
self.data = str(self.data)
self.data = [self.data]
self.log = None
self.time_taken = kwargs.get('time_taken', 0)
self.summary = kwargs.get('summary', str())
self.schema = kwargs.get('schema', None)
self.runtime_profile = kwargs.get('runtime_profile', str())
self.exec_summary = kwargs.get('exec_summary', None)
def get_data(self):
return self.__format_data()
def __format_data(self):
if self.data:
return '\n'.join(self.data)
return ''
def __str__(self):
message = ('Summary: %s\n'
'Success: %s\n'
'Took: %s(s)\n'
'Data:\n%s\n'
% (self.summary, self.success, self.time_taken,
self.__format_data())
)
return message
# Interface to beeswax. Responsible for executing queries, fetching results.
class ImpalaBeeswaxClient(object):
# Regex applied to all tokens of a query to detect the query type.
INSERT_REGEX = re.compile("^insert$", re.I)
def __init__(self, impalad, use_kerberos=False, user=None, password=None,
use_ssl=False):
self.connected = False
self.impalad = impalad
self.imp_service = None
self.transport = None
self.use_kerberos = use_kerberos
self.use_ssl = use_ssl
self.user, self.password = user, password
self.use_ldap = (self.user is not None)
self.__query_options = {}
self.query_states = QueryState._NAMES_TO_VALUES
def __options_to_string_list(self):
return ["%s=%s" % (k,v) for (k,v) in self.__query_options.iteritems()]
def get_query_options(self):
return self.__query_options
def set_query_option(self, name, value):
self.__query_options[name.upper()] = value
def set_query_options(self, query_option_dict):
if query_option_dict is None:
raise ValueError, 'Cannot pass None value for query options'
self.clear_query_options()
for name, value in query_option_dict.iteritems():
self.set_query_option(name, value)
def get_query_option(self, name):
return self.__query_options.get(name.upper())
def clear_query_options(self):
self.__query_options.clear()
def connect(self):
"""Connect to impalad specified in intializing this object
Raises an exception if the connection is unsuccesful.
"""
try:
self.impalad = self.impalad.split(':')
self.transport = self.__get_transport()
self.transport.open()
protocol = TBinaryProtocol.TBinaryProtocol(self.transport)
self.imp_service = ImpalaService.Client(protocol)
self.connected = True
except Exception, e:
raise ImpalaBeeswaxException(self.__build_error_message(e), e)
def close_connection(self):
"""Close the transport if it's still open"""
if self.transport:
self.transport.close()
self.connected = False
def __get_transport(self):
"""Creates the proper transport type based environment (secure vs unsecure)"""
trans_type = 'buffered'
if self.use_kerberos:
trans_type = 'kerberos'
elif self.use_ldap:
trans_type = 'plain_sasl'
return create_transport(host=self.impalad[0], port=int(self.impalad[1]),
service='impala', transport_type=trans_type, user=self.user,
password=self.password, use_ssl=self.use_ssl)
def execute(self, query_string, user=None):
"""Re-directs the query to its appropriate handler, returns ImpalaBeeswaxResult"""
# Take care of leading/trailing whitespaces.
query_string = query_string.strip()
start = time.time()
start_time = datetime.now()
handle = self.__execute_query(query_string.strip(), user=user)
if self.__get_query_type(query_string) == 'insert':
# DML queries are finished by this point.
time_taken = time.time() - start
# fetch_results() will close the query after which there is no guarantee that
# profile and log will be available so fetch them first.
runtime_profile = self.get_runtime_profile(handle)
exec_summary = self.get_exec_summary_and_parse(handle)
log = self.get_log(handle.log_context)
result = self.fetch_results(query_string, handle)
result.time_taken, result.start_time, result.runtime_profile, result.log = \
time_taken, start_time, runtime_profile, log
result.exec_summary = exec_summary
else:
# For SELECT queries, execution might still be ongoing. fetch_results() will block
# until the query is completed.
result = self.fetch_results(query_string, handle)
result.time_taken = time.time() - start
result.start_time = start_time
result.exec_summary = self.get_exec_summary_and_parse(handle)
result.log = self.get_log(handle.log_context)
result.runtime_profile = self.get_runtime_profile(handle)
self.close_query(handle)
return result
def get_exec_summary(self, handle):
return self.__do_rpc(lambda: self.imp_service.GetExecSummary(handle))
def get_exec_summary_and_parse(self, handle):
"""Calls GetExecSummary() for the last query handle, parses it and returns a summary
table. Returns None in case of an error or an empty result"""
try:
summary = self.get_exec_summary(handle)
except ImpalaBeeswaxException:
summary = None
if summary is None or summary.nodes is None:
return None
# If exec summary is not implemented in Impala, this function returns, so we do not
# get the function __build_summary_table which requires TExecStats to be imported.
output = []
self.__build_summary_table(summary, 0, False, 0, False, output)
return output
def __build_summary_table(self, summary, idx, is_fragment_root, indent_level,
new_indent_level, output):
"""NOTE: This was taken from impala_shell.py. Changes made here must be made there as
well. TODO: This method will be a placed in a library that is shared between
impala_shell and this file. (IMPALA-5792)
Direct translation of Coordinator::PrintExecSummary() to recursively build a list
of rows of summary statistics, one per exec node
summary: the TExecSummary object that contains all the summary data
idx: the index of the node to print
is_fragment_root: true if the node to print is the root of a fragment (and therefore
feeds into an exchange)
indent_level: the number of spaces to print before writing the node's label, to give
the appearance of a tree. The 0th child of a node has the same indent_level as its
parent. All other children have an indent_level of one greater than their parent.
new_indent_level: If true, this indent level is different from the previous row's.
output: the list of rows into which to append the rows produced for this node and its
children.
Returns the index of the next exec node in summary.exec_nodes that should be
processed, used internally to this method only.
"""
attrs = ["latency_ns", "cpu_time_ns", "cardinality", "memory_used"]
# Initialise aggregate and maximum stats
agg_stats, max_stats = TExecStats(), TExecStats()
for attr in attrs:
setattr(agg_stats, attr, 0)
setattr(max_stats, attr, 0)
row = {}
node = summary.nodes[idx]
# exec_stats may not be set even if the query is FINISHED if there are fragments that
# are still executing or that were cancelled before sending a status report.
if node.exec_stats is not None:
for stats in node.exec_stats:
for attr in attrs:
val = getattr(stats, attr)
if val is not None:
setattr(agg_stats, attr, getattr(agg_stats, attr) + val)
setattr(max_stats, attr, max(getattr(max_stats, attr), val))
if len(node.exec_stats) > 0:
avg_time = agg_stats.latency_ns / len(node.exec_stats)
else:
avg_time = 0
row["num_hosts"] = len(node.exec_stats)
row["avg_time"] = avg_time
# If the node is a broadcast-receiving exchange node, the cardinality of rows produced
# is the max over all instances (which should all have received the same number of
# rows). Otherwise, the cardinality is the sum over all instances which process
# disjoint partitions.
if node.is_broadcast:
cardinality = max_stats.cardinality
else:
cardinality = agg_stats.cardinality
est_stats = node.estimated_stats
label_prefix = ""
if indent_level > 0:
label_prefix = "|"
label_prefix += " |" * (indent_level - 1)
if new_indent_level:
label_prefix += "--"
else:
label_prefix += " "
row["prefix"] = label_prefix
row["operator"] = node.label
row["max_time"] = max_stats.latency_ns
row["num_rows"] = cardinality
row["est_num_rows"] = est_stats.cardinality
row["peak_mem"] = max_stats.memory_used
row["est_peak_mem"] = est_stats.memory_used
row["detail"] = node.label_detail
output.append(row)
if summary.exch_to_sender_map is not None and idx in summary.exch_to_sender_map:
sender_idx = summary.exch_to_sender_map[idx]
# This is an exchange node, so the sender is a fragment root, and should be printed
# next.
self.__build_summary_table(summary, sender_idx, True, indent_level, False, output)
idx += 1
if node.num_children > 0:
first_child_output = []
idx = \
self.__build_summary_table(
summary, idx, False, indent_level, False, first_child_output)
for child_idx in xrange(1, node.num_children):
# All other children are indented (we only have 0, 1 or 2 children for every exec
# node at the moment)
idx = self.__build_summary_table(
summary, idx, False, indent_level + 1, True, output)
output += first_child_output
return idx
def get_runtime_profile(self, handle):
return self.__do_rpc(lambda: self.imp_service.GetRuntimeProfile(handle))
def execute_query_async(self, query_string, user=None):
"""
Executes a query asynchronously
Issues a query and returns the query handle to the caller for processing.
"""
query = BeeswaxService.Query()
query.query = query_string
query.hadoop_user = user if user is not None else getpass.getuser()
query.configuration = self.__options_to_string_list()
handle = self.__do_rpc(lambda: self.imp_service.query(query,))
LOG.info("Started query {0}".format(handle.id))
return handle
def __execute_query(self, query_string, user=None):
"""Executes a query and waits for completion"""
handle = self.execute_query_async(query_string, user=user)
# Wait for the query to finish execution.
self.wait_for_finished(handle)
return handle
def cancel_query(self, query_id):
return self.__do_rpc(lambda: self.imp_service.Cancel(query_id))
def close_query(self, handle):
self.__do_rpc(lambda: self.imp_service.close(handle))
def wait_for_finished(self, query_handle):
"""Given a query handle, polls the coordinator waiting for the query to transition to
'FINISHED' state"""
while True:
query_state = self.get_state(query_handle)
# if the rpc succeeded, the output is the query state
if query_state == self.query_states["FINISHED"]:
break
elif query_state == self.query_states["EXCEPTION"]:
try:
error_log = self.__do_rpc(
lambda: self.imp_service.get_log(query_handle.log_context))
raise ImpalaBeeswaxException("Query aborted:" + error_log, None)
finally:
self.close_query(query_handle)
time.sleep(0.05)
def wait_for_finished_timeout(self, query_handle, timeout=10):
"""Given a query handle and a timeout, polls the coordinator waiting for the query to
transition to 'FINISHED' state till 'timeout' seconds"""
start_time = time.time()
while (time.time() - start_time < timeout):
query_state = self.get_state(query_handle)
# if the rpc succeeded, the output is the query state
if query_state == self.query_states["FINISHED"]:
return True
elif query_state == self.query_states["EXCEPTION"]:
try:
error_log = self.__do_rpc(
lambda: self.imp_service.get_log(query_handle.log_context))
raise ImpalaBeeswaxException("Query aborted:" + error_log, None)
finally:
self.close_query(query_handle)
time.sleep(0.05)
return False
def wait_for_admission_control(self, query_handle):
"""Given a query handle, polls the coordinator waiting for it to complete
admission control processing of the query"""
while True:
query_state = self.get_state(query_handle)
if query_state > self.query_states["COMPILED"]:
break
time.sleep(0.05)
def get_admission_result(self, query_handle):
"""Given a query handle, returns the admission result from the query profile"""
query_state = self.get_state(query_handle)
if query_state > self.query_states["COMPILED"]:
query_profile = self.get_runtime_profile(query_handle)
admit_result = re.search(r"Admission result: (.*)", query_profile)
if admit_result:
return admit_result.group(1)
return ""
def get_default_configuration(self):
return self.__do_rpc(lambda: self.imp_service.get_default_configuration(False))
def get_state(self, query_handle):
return self.__do_rpc(lambda: self.imp_service.get_state(query_handle))
def get_log(self, query_handle):
return self.__do_rpc(lambda: self.imp_service.get_log(query_handle))
def refresh(self):
"""Invalidate the Impalad catalog"""
return self.execute("invalidate metadata")
def refresh_table(self, db_name, table_name):
"""Refresh a specific table from the catalog"""
return self.execute("refresh %s.%s" % (db_name, table_name))
def fetch_results(self, query_string, query_handle, max_rows = -1):
"""Fetches query results given a handle and query type (insert, use, other)"""
query_type = self.__get_query_type(query_string)
if query_type == 'use':
# TODO: "use <database>" does not currently throw an error. Need to update this
# to handle the error case once that behavior has been changed.
return ImpalaBeeswaxResult(query=query_string, success=True, data=[])
# Result fetching for insert is different from other queries.
exec_result = None
if query_type == 'insert':
exec_result = self.__fetch_insert_results(query_handle)
else:
exec_result = self.__fetch_results(query_handle, max_rows)
exec_result.query = query_string
return exec_result
def __fetch_results(self, handle, max_rows = -1):
"""Handles query results, returns a ImpalaBeeswaxResult object"""
schema = self.__do_rpc(lambda: self.imp_service.get_results_metadata(handle)).schema
# The query has finished, we can fetch the results
result_rows = []
while len(result_rows) < max_rows or max_rows < 0:
fetch_rows = -1 if max_rows < 0 else max_rows - len(result_rows)
results = self.__do_rpc(lambda: self.imp_service.fetch(handle, False, fetch_rows))
result_rows.extend(results.data)
if not results.has_more:
break
# The query executed successfully and all the data was fetched.
exec_result = ImpalaBeeswaxResult(success=True, data=result_rows, schema=schema)
exec_result.summary = 'Returned %d rows' % (len(result_rows))
return exec_result
def __fetch_insert_results(self, handle):
"""Executes an insert query"""
result = self.__do_rpc(lambda: self.imp_service.CloseInsert(handle))
# The insert was successful
num_rows = sum(map(int, result.rows_modified.values()))
data = ["%s: %s" % row for row in result.rows_modified.iteritems()]
exec_result = ImpalaBeeswaxResult(success=True, data=data)
exec_result.summary = "Inserted %d rows" % (num_rows,)
return exec_result
def __get_query_type(self, query_string):
# Set posix=True and add "'" to escaped quotes
# to deal with escaped quotes in string literals
lexer = shlex.shlex(query_string.lstrip(), posix=True)
lexer.escapedquotes += "'"
tokens = list(lexer)
# Do not classify explain queries as 'insert'
if (tokens[0].lower() == "explain"):
return tokens[0].lower()
# Because the WITH clause may precede INSERT or SELECT queries,
# just checking the first token is insufficient.
if filter(self.INSERT_REGEX.match, tokens):
return "insert"
return tokens[0].lower()
def __build_error_message(self, exception):
"""Construct a meaningful exception string"""
message = str(exception)
if isinstance(exception, BeeswaxService.BeeswaxException):
message = exception.message
return 'INNER EXCEPTION: %s\n MESSAGE: %s' % (exception.__class__, message)
def __do_rpc(self, rpc):
"""Executes the RPC lambda provided with some error checking.
Catches all the relevant exceptions and re throws them wrapped
in a custom exception [ImpalaBeeswaxException].
"""
if not self.connected:
raise ImpalaBeeswaxException("Not connected", None)
try:
return rpc()
except BeeswaxService.BeeswaxException, b:
raise ImpalaBeeswaxException(self.__build_error_message(b), b)
except TTransportException, e:
self.connected = False
raise ImpalaBeeswaxException(self.__build_error_message(e), e)
except TApplicationException, t:
raise ImpalaBeeswaxException(self.__build_error_message(t), t)
except Exception, u:
raise ImpalaBeeswaxException(self.__build_error_message(u), u)
|
py | 1a3bc6ec5837fb09efbccce52f56b6d7c7d4590c | from .user import User, LoginForm
from .posts import Post, PostForm |
py | 1a3bc6f611e3722fd68ba61d9dfd5943051d7370 | from face_detection import Model_face_detection
from facial_landmarks_detection import Model_landmarks
from head_pose_estimation import Model_pose
from gaze_estimation import Model_gaze
from argparse import ArgumentParser
from mouse_controller import MouseController
from input_feeder import InputFeeder
import cv2
import os
import sys
import logging as log
moveto=['up','down', 'left', 'right']
def build_argparser():
parser= ArgumentParser()
parser.add_argument("--face", required=False,help= "Face detecion model path ",default='/home/adrian-estelio/Documents/vision/intel/face-detection-retail-0005/FP32-INT8/face-detection-retail-0005')
parser.add_argument("--landmarks", required=False,help= "landmarks detection model path ", default='/home/adrian-estelio/Documents/vision/intel/landmarks-regression-retail-0009/FP32/landmarks-regression-retail-0009')
parser.add_argument("--head", required=False,help= "head pose estimation model path ",default='/home/adrian-estelio/Documents/vision/intel/head-pose-estimation-adas-0001/FP32/head-pose-estimation-adas-0001')
parser.add_argument("--gaze", required=False,help= "Gaze estimation model path ",default='/home/adrian-estelio/Documents/vision/intel/gaze-estimation-adas-0002/FP32/gaze-estimation-adas-0002')
parser.add_argument("--input", required=False,help="Input: image or video path or webcam (CAM) ", default='CAM')#/home/adrian-estelio/Documents/vision/Mouse_controller/resources/image.jpg')
parser.add_argument("--visual_o",required=False,help="Flag to display face: True or False", default="True")
parser.add_argument("--device",required=False,help="Device to run the inference", default="CPU")
return parser
def move(coor):
mouse= MouseController('high','fast')
if coor[0]<-0.33 and coor[1]>-0.05 and coor[1]<0.05:
log.info("Moving to %s",moveto[3])
mouse.move(1,0)
elif coor[0]>0.33 and coor[1]<0:
log.info("Moving to %s",moveto[2])
mouse.move(-1,0)
elif coor[1]>0.11 and coor[0]>-0.17:
log.info("Moving to %s",moveto[0])
mouse.move(0,1)
elif coor[0]>-0.05 and coor[1]<-0.13:
log.info("Moving to %s",moveto[1])
mouse.move(0,-1)
def infer_on_stream(args):
face_model = Model_face_detection(args.face,device=args.device)
face_model.load_model()
landmarks_model = Model_landmarks(args.landmarks,device=args.device)
landmarks_model.load_model()
head_model = Model_pose(args.head,device=args.device)
head_model.load_model()
gaze_model = Model_gaze(args.gaze,device=args.device)
gaze_model.load_model()
if args.input == 'CAM':
feeder= InputFeeder('CAM')
elif args.input.endswith('.jpg') or args.input.endswith('.bmp'):
feeder= InputFeeder('image',args.input)
else:
feeder= InputFeeder('video',args.input)
if not os.path.isfile(args.input):
log.error("Specified input file doesn't exist")
sys.exit(1)
feeder.load_data()
width = feeder.width
height = feeder.height
fps = feeder.fps
out = cv2.VideoWriter('output/out.mp4', cv2.VideoWriter_fourcc(*'avc1'), fps, (width,height),True)
feeder.open()
if not feeder.opened:
log.error("Unable to open source")
while feeder.opened:
image = feeder.next_batch()
if not feeder.opened:
break
key_pressed = cv2.waitKey(1)
frame, face = face_model.predict(image)
if len(face)>0:
_,r,l = landmarks_model.predict(face)
angles= head_model.predict(face)
vector = gaze_model.predict(r,l,angles)
move(vector)
out.write(frame)
if args.visual_o == 'True':
cv2.imshow('frame',frame)
if feeder.input_type == 'image':
cv2.imwrite('output/r.jpg',r)
cv2.imwrite('output/l.jpg',l)
cv2.imwrite('output/frame.jpg',frame)
break
if key_pressed == 27:
break
out.release()
feeder.close()
def main():
log.basicConfig(level=log.INFO)
log.info("Aplication started")
args =build_argparser().parse_args()
infer_on_stream(args)
if __name__ == '__main__':
main() |
py | 1a3bc869614177bff0e097d1122aee851c6a4c20 | from lib import rpclib
import json
import time
import re
import sys
import pickle
import platform
import os
import subprocess
import signal
from slickrpc import Proxy
from binascii import hexlify
from binascii import unhexlify
from functools import partial
from shutil import copy
operating_system = platform.system()
if operating_system != 'Win64' and operating_system != 'Windows':
import readline
def colorize(string, color):
colors = {
'blue': '\033[94m',
'magenta': '\033[95m',
'green': '\033[92m',
'red': '\033[91m'
}
if color not in colors:
return string
else:
return colors[color] + string + '\033[0m'
def rpc_connection_tui():
# TODO: possible to save multiply entries from successfull sessions and ask user to choose then
while True:
restore_choice = input("Do you want to use connection details from previous session? [y/n]: ")
if restore_choice == "y":
try:
with open("connection.json", "r") as file:
connection_json = json.load(file)
rpc_user = connection_json["rpc_user"]
rpc_password = connection_json["rpc_password"]
rpc_port = connection_json["rpc_port"]
rpc_connection = rpclib.rpc_connect(rpc_user, rpc_password, int(rpc_port))
except FileNotFoundError:
print(colorize("You do not have cached connection details. Please select n for connection setup", "red"))
break
elif restore_choice == "n":
rpc_user = input("Input your rpc user: ")
rpc_password = input("Input your rpc password: ")
rpc_port = input("Input your rpc port: ")
connection_details = {"rpc_user": rpc_user,
"rpc_password": rpc_password,
"rpc_port": rpc_port}
connection_json = json.dumps(connection_details)
with open("connection.json", "w+") as file:
file.write(connection_json)
rpc_connection = rpclib.rpc_connect(rpc_user, rpc_password, int(rpc_port))
break
else:
print(colorize("Please input y or n", "red"))
return rpc_connection
def def_credentials(chain):
rpcport ='';
operating_system = platform.system()
if operating_system == 'Darwin':
ac_dir = os.environ['HOME'] + '/Library/Application Support/Komodo'
elif operating_system == 'Linux':
ac_dir = os.environ['HOME'] + '/.komodo'
elif operating_system == 'Win64' or operating_system == 'Windows':
ac_dir = '%s/komodo/' % os.environ['APPDATA']
if chain == 'KMD':
coin_config_file = str(ac_dir + '/komodo.conf')
else:
coin_config_file = str(ac_dir + '/' + chain + '/' + chain + '.conf')
with open(coin_config_file, 'r') as f:
for line in f:
l = line.rstrip()
if re.search('rpcuser', l):
rpcuser = l.replace('rpcuser=', '')
elif re.search('rpcpassword', l):
rpcpassword = l.replace('rpcpassword=', '')
elif re.search('rpcport', l):
rpcport = l.replace('rpcport=', '')
if len(rpcport) == 0:
if chain == 'KMD':
rpcport = 7771
else:
print("rpcport not in conf file, exiting")
print("check "+coin_config_file)
exit(1)
return(Proxy("http://%s:%[email protected]:%d"%(rpcuser, rpcpassword, int(rpcport))))
def getinfo_tui(rpc_connection):
info_raw = rpclib.getinfo(rpc_connection)
if isinstance(info_raw, dict):
for key in info_raw:
print("{}: {}".format(key, info_raw[key]))
input("Press [Enter] to continue...")
else:
print("Error!\n")
print(info_raw)
input("\nPress [Enter] to continue...")
def token_create_tui(rpc_connection):
while True:
try:
name = input("Set your token name: ")
supply = input("Set your token supply: ")
description = input("Set your token description: ")
except KeyboardInterrupt:
break
else:
token_hex = rpclib.token_create(rpc_connection, name, supply, description)
if token_hex['result'] == "error":
print(colorize("\nSomething went wrong!\n", "pink"))
print(token_hex)
print("\n")
input("Press [Enter] to continue...")
break
else:
try:
token_txid = rpclib.sendrawtransaction(rpc_connection,
token_hex['hex'])
except KeyError:
print(token_txid)
print("Error")
input("Press [Enter] to continue...")
break
finally:
print(colorize("Token creation transaction broadcasted: " + token_txid, "green"))
file = open("tokens_list", "a")
file.writelines(token_txid + "\n")
file.close()
print(colorize("Entry added to tokens_list file!\n", "green"))
input("Press [Enter] to continue...")
break
def oracle_create_tui(rpc_connection):
print(colorize("\nAvailiable data types:\n", "blue"))
oracles_data_types = ["Ihh -> height, blockhash, merkleroot\ns -> <256 char string\nS -> <65536 char string\nd -> <256 binary data\nD -> <65536 binary data",
"c -> 1 byte signed little endian number, C unsigned\nt -> 2 byte signed little endian number, T unsigned",
"i -> 4 byte signed little endian number, I unsigned\nl -> 8 byte signed little endian number, L unsigned",
"h -> 32 byte hash\n"]
for oracles_type in oracles_data_types:
print(str(oracles_type))
while True:
try:
name = input("Set your oracle name: ")
description = input("Set your oracle description: ")
oracle_data_type = input("Set your oracle type (e.g. Ihh): ")
except KeyboardInterrupt:
break
else:
oracle_hex = rpclib.oracles_create(rpc_connection, name, description, oracle_data_type)
if oracle_hex['result'] == "error":
print(colorize("\nSomething went wrong!\n", "pink"))
print(oracle_hex)
print("\n")
input("Press [Enter] to continue...")
break
else:
try:
oracle_txid = rpclib.sendrawtransaction(rpc_connection, oracle_hex['hex'])
except KeyError:
print(oracle_txid)
print("Error")
input("Press [Enter] to continue...")
break
finally:
print(colorize("Oracle creation transaction broadcasted: " + oracle_txid, "green"))
file = open("oracles_list", "a")
file.writelines(oracle_txid + "\n")
file.close()
print(colorize("Entry added to oracles_list file!\n", "green"))
input("Press [Enter] to continue...")
break
def oracle_fund_tui(rpc_connection):
try:
print(colorize("Oracles created from this instance by TUI: \n", "blue"))
with open("oracles_list", "r") as file:
for oracle in file:
print(oracle)
print(colorize('_' * 65, "blue"))
print("\n")
except FileNotFoundError:
print("Seems like a no oracles created from this instance yet\n")
pass
while True:
try:
oracle_id = input("Input txid of oracle you want to register to: ")
except KeyboardInterrupt:
break
oracle_fund_hex = rpclib.oracles_fund(rpc_connection, oracle_id)
if oracle_fund_hex['result'] == "error":
print(colorize("\nSomething went wrong!\n", "pink"))
print(oracle_fund_hex)
print("\n")
input("Press [Enter] to continue...")
break
else:
try:
oracle_fund_txid = rpclib.sendrawtransaction(rpc_connection, oracle_fund_hex['hex'])
except KeyError:
print(oracle_fund_hex)
print("Error")
input("Press [Enter] to continue...")
break
else:
print(colorize("Oracle fund transaction broadcasted: " + oracle_fund_txid, "green"))
input("Press [Enter] to continue...")
break
def oracle_register_tui(rpc_connection):
#TODO: have an idea since blackjoker new RPC call
#grab all list and printout only or which owner match with node pubkey
try:
print(colorize("Oracles created from this instance by TUI: \n", "blue"))
with open("oracles_list", "r") as file:
for oracle in file:
print(oracle)
print(colorize('_' * 65, "blue"))
print("\n")
except FileNotFoundError:
print("Seems like a no oracles created from this instance yet\n")
pass
while True:
try:
oracle_id = input("Input txid of oracle you want to register to: ")
data_fee = input("Set publisher datafee (in satoshis): ")
except KeyboardInterrupt:
break
oracle_register_hex = rpclib.oracles_register(rpc_connection, oracle_id, data_fee)
if oracle_register_hex['result'] == "error":
print(colorize("\nSomething went wrong!\n", "pink"))
print(oracle_register_hex)
print("\n")
input("Press [Enter] to continue...")
break
else:
try:
oracle_register_txid = rpclib.sendrawtransaction(rpc_connection, oracle_register_hex['hex'])
except KeyError:
print(oracle_register_hex)
print("Error")
input("Press [Enter] to continue...")
break
else:
print(colorize("Oracle registration transaction broadcasted: " + oracle_register_txid, "green"))
input("Press [Enter] to continue...")
break
def oracle_subscription_utxogen(rpc_connection):
# TODO: have an idea since blackjoker new RPC call
# grab all list and printout only or which owner match with node pubkey
try:
print(colorize("Oracles created from this instance by TUI: \n", "blue"))
with open("oracles_list", "r") as file:
for oracle in file:
print(oracle)
print(colorize('_' * 65, "blue"))
print("\n")
except FileNotFoundError:
print("Seems like a no oracles created from this instance yet\n")
pass
while True:
try:
oracle_id = input("Input oracle ID you want to subscribe to: ")
#printout to fast copypaste publisher id
oracle_info = rpclib.oracles_info(rpc_connection, oracle_id)
publishers = 0
print(colorize("\nPublishers registered for a selected oracle: \n", "blue"))
try:
for entry in oracle_info["registered"]:
publisher = entry["publisher"]
print(publisher + "\n")
publishers = publishers + 1
print("Total publishers:{}".format(publishers))
except (KeyError, ConnectionResetError):
print(colorize("Please re-check your input. Oracle txid seems not valid.", "red"))
pass
print(colorize('_' * 65, "blue"))
print("\n")
if publishers == 0:
print(colorize("This oracle have no publishers to subscribe.\n"
"Please register as an oracle publisher first and/or wait since registration transaciton mined!", "red"))
input("Press [Enter] to continue...")
break
publisher_id = input("Input oracle publisher id you want to subscribe to: ")
data_fee = input("Input subscription fee (in COINS!): ")
utxo_num = int(input("Input how many transactions you want to broadcast: "))
except KeyboardInterrupt:
break
while utxo_num > 0:
while True:
oracle_subscription_hex = rpclib.oracles_subscribe(rpc_connection, oracle_id, publisher_id, data_fee)
oracle_subscription_txid = rpclib.sendrawtransaction(rpc_connection, oracle_subscription_hex['hex'])
mempool = rpclib.get_rawmempool(rpc_connection)
if oracle_subscription_txid in mempool:
break
else:
pass
print(colorize("Oracle subscription transaction broadcasted: " + oracle_subscription_txid, "green"))
utxo_num = utxo_num - 1
input("Press [Enter] to continue...")
break
def gateways_bind_tui(rpc_connection):
# main loop with keyboard interrupt handling
while True:
try:
while True:
try:
print(colorize("Tokens created from this instance by TUI: \n", "blue"))
with open("tokens_list", "r") as file:
for oracle in file:
print(oracle)
print(colorize('_' * 65, "blue"))
print("\n")
except FileNotFoundError:
print("Seems like a no oracles created from this instance yet\n")
pass
token_id = input("Input id of token you want to use in gw bind: ")
try:
token_name = rpclib.token_info(rpc_connection, token_id)["name"]
except KeyError:
print(colorize("Not valid tokenid. Please try again.", "red"))
input("Press [Enter] to continue...")
token_info = rpclib.token_info(rpc_connection, token_id)
print(colorize("\n{} token total supply: {}\n".format(token_id, token_info["supply"]), "blue"))
token_supply = input("Input supply for token binding: ")
try:
print(colorize("\nOracles created from this instance by TUI: \n", "blue"))
with open("oracles_list", "r") as file:
for oracle in file:
print(oracle)
print(colorize('_' * 65, "blue"))
print("\n")
except FileNotFoundError:
print("Seems like a no oracles created from this instance yet\n")
pass
oracle_id = input("Input id of oracle you want to use in gw bind: ")
try:
oracle_name = rpclib.oracles_info(rpc_connection, oracle_id)["name"]
except KeyError:
print(colorize("Not valid oracleid. Please try again.", "red"))
input("Press [Enter] to continue...")
while True:
coin_name = input("Input external coin ticker (binded oracle and token need to have same name!): ")
if token_name == oracle_name and token_name == coin_name:
break
else:
print(colorize("Token name, oracle name and external coin ticker should match!", "red"))
while True:
M = input("Input minimal amount of pubkeys needed for transaction confirmation (1 for non-multisig gw): ")
N = input("Input maximal amount of pubkeys needed for transaction confirmation (1 for non-multisig gw): ")
if (int(N) >= int(M)):
break
else:
print("Maximal amount of pubkeys should be more or equal than minimal. Please try again.")
pubkeys = []
for i in range(int(N)):
pubkeys.append(input("Input pubkey {}: ".format(i+1)))
pubtype = input("Input pubtype of external coin: ")
p2shtype = input("Input p2shtype of external coin: ")
wiftype = input("Input wiftype of external coin: ")
args = [rpc_connection, token_id, oracle_id, coin_name, token_supply, M, N]
new_args = [str(pubtype), str(p2shtype), wiftype]
args = args + pubkeys + new_args
# broadcasting block
try:
gateways_bind_hex = rpclib.gateways_bind(*args)
except Exception as e:
print(e)
input("Press [Enter] to continue...")
break
try:
gateways_bind_txid = rpclib.sendrawtransaction(rpc_connection, gateways_bind_hex["hex"])
except Exception as e:
print(e)
print(gateways_bind_hex)
input("Press [Enter] to continue...")
break
else:
print(colorize("Gateway bind transaction broadcasted: " + gateways_bind_txid, "green"))
file = open("gateways_list", "a")
file.writelines(gateways_bind_txid + "\n")
file.close()
print(colorize("Entry added to gateways_list file!\n", "green"))
input("Press [Enter] to continue...")
break
break
except KeyboardInterrupt:
break
# temporary :trollface: custom connection function solution
# to have connection to KMD daemon and cache it in separate file
def rpc_kmd_connection_tui():
while True:
restore_choice = input("Do you want to use KMD daemon connection details from previous session? [y/n]: ")
if restore_choice == "y":
try:
with open("connection_kmd.json", "r") as file:
connection_json = json.load(file)
rpc_user = connection_json["rpc_user"]
rpc_password = connection_json["rpc_password"]
rpc_port = connection_json["rpc_port"]
rpc_connection_kmd = rpclib.rpc_connect(rpc_user, rpc_password, int(rpc_port))
try:
print(rpc_connection_kmd.getinfo())
print(colorize("Successfully connected!\n", "green"))
input("Press [Enter] to continue...")
break
except Exception as e:
print(e)
print(colorize("NOT CONNECTED!\n", "red"))
input("Press [Enter] to continue...")
break
except FileNotFoundError:
print(colorize("You do not have cached KMD daemon connection details."
" Please select n for connection setup", "red"))
input("Press [Enter] to continue...")
elif restore_choice == "n":
rpc_user = input("Input your rpc user: ")
rpc_password = input("Input your rpc password: ")
rpc_port = input("Input your rpc port: ")
connection_details = {"rpc_user": rpc_user,
"rpc_password": rpc_password,
"rpc_port": rpc_port}
connection_json = json.dumps(connection_details)
with open("connection_kmd.json", "w+") as file:
file.write(connection_json)
rpc_connection_kmd = rpclib.rpc_connect(rpc_user, rpc_password, int(rpc_port))
try:
print(rpc_connection_kmd.getinfo())
print(colorize("Successfully connected!\n", "green"))
input("Press [Enter] to continue...")
break
except Exception as e:
print(e)
print(colorize("NOT CONNECTED!\n", "red"))
input("Press [Enter] to continue...")
break
else:
print(colorize("Please input y or n", "red"))
return rpc_connection_kmd
def z_sendmany_twoaddresses(rpc_connection, sendaddress, recepient1, amount1, recepient2, amount2):
str_sending_block = "[{{\"address\":\"{}\",\"amount\":{}}},{{\"address\":\"{}\",\"amount\":{}}}]".format(recepient1, amount1, recepient2, amount2)
sending_block = json.loads(str_sending_block)
operation_id = rpc_connection.z_sendmany(sendaddress,sending_block)
return operation_id
def operationstatus_to_txid(rpc_connection, zstatus):
str_sending_block = "[\"{}\"]".format(zstatus)
sending_block = json.loads(str_sending_block)
operation_json = rpc_connection.z_getoperationstatus(sending_block)
operation_dump = json.dumps(operation_json)
operation_dict = json.loads(operation_dump)[0]
txid = operation_dict['result']['txid']
return txid
def gateways_send_kmd(rpc_connection):
# TODO: have to handle CTRL+C on text input
print(colorize("Please be carefull when input wallet addresses and amounts since all transactions doing in real KMD!", "pink"))
print("Your addresses with balances: ")
list_address_groupings = rpc_connection.listaddressgroupings()
for address in list_address_groupings:
print(str(address) + "\n")
sendaddress = input("Input address from which you transfer KMD: ")
recepient1 = input("Input address which belongs to pubkey which will receive tokens: ")
amount1 = 0.0001
recepient2 = input("Input gateway deposit address: ")
file = open("deposits_list", "a")
#have to show here deposit addresses for gateways created by user
amount2 = input("Input how many KMD you want to deposit on this gateway: ")
operation = z_sendmany_twoaddresses(rpc_connection, sendaddress, recepient1, amount1, recepient2, amount2)
print("Operation proceed! " + str(operation) + " Let's wait 2 seconds to get txid")
# trying to avoid pending status of operation
time.sleep(2)
txid = operationstatus_to_txid(rpc_connection, operation)
file.writelines(txid + "\n")
file.close()
print(colorize("KMD Transaction ID: " + str(txid) + " Entry added to deposits_list file", "green"))
input("Press [Enter] to continue...")
def gateways_deposit_tui(rpc_connection_assetchain, rpc_connection_komodo):
while True:
bind_txid = input("Input your gateway bind txid: ")
coin_name = input("Input your external coin ticker (e.g. KMD): ")
coin_txid = input("Input your deposit txid: ")
dest_pub = input("Input pubkey which claim deposit: ")
amount = input("Input amount of your deposit: ")
height = rpc_connection_komodo.getrawtransaction(coin_txid, 1)["height"]
deposit_hex = rpc_connection_komodo.getrawtransaction(coin_txid, 1)["hex"]
claim_vout = "0"
proof_sending_block = "[\"{}\"]".format(coin_txid)
proof = rpc_connection_komodo.gettxoutproof(json.loads(proof_sending_block))
deposit_hex = rpclib.gateways_deposit(rpc_connection_assetchain, bind_txid, str(height), coin_name, \
coin_txid, claim_vout, deposit_hex, proof, dest_pub, amount)
print(deposit_hex)
deposit_txid = rpclib.sendrawtransaction(rpc_connection_assetchain, deposit_hex["hex"])
print("Done! Gateways deposit txid is: " + deposit_txid + " Please not forget to claim your deposit!")
input("Press [Enter] to continue...")
break
def gateways_claim_tui(rpc_connection):
while True:
bind_txid = input("Input your gateway bind txid: ")
coin_name = input("Input your external coin ticker (e.g. KMD): ")
deposit_txid = input("Input your gatewaysdeposit txid: ")
dest_pub = input("Input pubkey which claim deposit: ")
amount = input("Input amount of your deposit: ")
claim_hex = rpclib.gateways_claim(rpc_connection, bind_txid, coin_name, deposit_txid, dest_pub, amount)
try:
claim_txid = rpclib.sendrawtransaction(rpc_connection, claim_hex["hex"])
except Exception as e:
print(e)
print(claim_hex)
input("Press [Enter] to continue...")
break
else:
print("Succesfully claimed! Claim transaction id: " + claim_txid)
input("Press [Enter] to continue...")
break
def gateways_withdrawal_tui(rpc_connection):
while True:
bind_txid = input("Input your gateway bind txid: ")
coin_name = input("Input your external coin ticker (e.g. KMD): ")
withdraw_pub = input("Input pubkey to which you want to withdraw: ")
amount = input("Input amount of withdrawal: ")
withdraw_hex = rpclib.gateways_withdraw(rpc_connection, bind_txid, coin_name, withdraw_pub, amount)
withdraw_txid = rpclib.sendrawtransaction(rpc_connection, withdraw_hex["hex"])
print(withdraw_txid)
input("Press [Enter] to continue...")
break
def print_mempool(rpc_connection):
while True:
mempool = rpclib.get_rawmempool(rpc_connection)
tx_counter = 0
print(colorize("Transactions in mempool: \n", "magenta"))
for transaction in mempool:
print(transaction + "\n")
tx_counter = tx_counter + 1
print("Total: " + str(tx_counter) + " transactions\n")
print("R + Enter to refresh list. E + Enter to exit menu." + "\n")
is_refresh = input("Choose your destiny: ")
if is_refresh == "R":
print("\n")
pass
elif is_refresh == "E":
print("\n")
break
else:
print("\nPlease choose R or E\n")
def print_tokens_list(rpc_connection):
# TODO: have to print it with tokeninfo to have sense
pass
def print_tokens_balances(rpc_connection):
# TODO: checking tokenbalance for each token from tokenlist and reflect non zero ones
pass
def hexdump(filename, chunk_size=1<<15):
data = ""
#add_spaces = partial(re.compile(b'(..)').sub, br'\1 ')
#write = getattr(sys.stdout, 'buffer', sys.stdout).write
with open(filename, 'rb') as file:
for chunk in iter(partial(file.read, chunk_size), b''):
data += str(hexlify(chunk).decode())
return data
def convert_file_oracle_d(rpc_connection):
while True:
path = input("Input path to file you want to upload to oracle: ")
try:
hex_data = (hexdump(path, 1))[2:]
except Exception as e:
print(e)
print("Seems something goes wrong (I guess you've specified wrong path)!")
input("Press [Enter] to continue...")
break
else:
length = round(len(hex_data) / 2)
if length > 256:
print("Length: " + str(length) + " bytes")
print("File is too big for this app")
input("Press [Enter] to continue...")
break
else:
hex_length = format(length, '#04x')[2:]
data_for_oracle = str(hex_length) + hex_data
print("File hex representation: \n")
print(data_for_oracle + "\n")
print("Length: " + str(length) + " bytes")
print("File converted!")
new_oracle_hex = rpclib.oracles_create(rpc_connection, "tonyconvert", path, "d")
new_oracle_txid = rpclib.sendrawtransaction(rpc_connection, new_oracle_hex["hex"])
time.sleep(0.5)
oracle_register_hex = rpclib.oracles_register(rpc_connection, new_oracle_txid, "10000")
oracle_register_txid = rpclib.sendrawtransaction(rpc_connection, oracle_register_hex["hex"])
time.sleep(0.5)
oracle_subscribe_hex = rpclib.oracles_subscribe(rpc_connection, new_oracle_txid, rpclib.getinfo(rpc_connection)["pubkey"], "0.001")
oracle_subscribe_txid = rpclib.sendrawtransaction(rpc_connection, oracle_subscribe_hex["hex"])
time.sleep(0.5)
while True:
mempool = rpclib.get_rawmempool(rpc_connection)
if oracle_subscribe_txid in mempool:
print("Waiting for oracle subscribtion tx to be mined" + "\n")
time.sleep(6)
pass
else:
break
oracles_data_hex = rpclib.oracles_data(rpc_connection, new_oracle_txid, data_for_oracle)
try:
oracle_data_txid = rpclib.sendrawtransaction(rpc_connection, oracles_data_hex["hex"])
except Exception as e:
print(oracles_data_hex)
print(e)
print("Oracle created: " + str(new_oracle_txid))
print("Data published: " + str(oracle_data_txid))
input("Press [Enter] to continue...")
break
def convert_file_oracle_D(rpc_connection):
while True:
path = input("Input path to file you want to upload to oracle: ")
try:
hex_data = (hexdump(path, 1))
except Exception as e:
print(e)
print("Seems something goes wrong (I guess you've specified wrong path)!")
input("Press [Enter] to continue...")
break
else:
length = round(len(hex_data) / 2)
# if length > 800000:
# print("Too big file size to upload for this version of program. Maximum size is 800KB.")
# input("Press [Enter] to continue...")
# break
if length > 8000:
# if file is more than 8000 bytes - slicing it to <= 8000 bytes chunks (16000 symbols = 8000 bytes)
data = [hex_data[i:i + 16000] for i in range(0, len(hex_data), 16000)]
chunks_amount = len(data)
# TODO: have to create oracle but subscribe this time chunks amount times to send whole file in same block
# TODO: 2 - on some point file will not fit block - have to find this point
# TODO: 3 way how I want to implement it first will keep whole file in RAM - have to implement some way to stream chunks to oracle before whole file readed
# TODO: have to "optimise" registration fee
# Maybe just check size first by something like a du ?
print("Length: " + str(length) + " bytes.\n Chunks amount: " + str(chunks_amount))
new_oracle_hex = rpclib.oracles_create(rpc_connection, "tonyconvert_" + str(chunks_amount), path, "D")
new_oracle_txid = rpclib.sendrawtransaction(rpc_connection, new_oracle_hex["hex"])
time.sleep(0.5)
oracle_register_hex = rpclib.oracles_register(rpc_connection, new_oracle_txid, "10000")
oracle_register_txid = rpclib.sendrawtransaction(rpc_connection, oracle_register_hex["hex"])
# subscribe chunks_amount + 1 times, but lets limit our broadcasting 100 tx per block (800KB/block)
if chunks_amount > 100:
utxo_num = 101
else:
utxo_num = chunks_amount
while utxo_num > 0:
while True:
oracle_subscription_hex = rpclib.oracles_subscribe(rpc_connection, new_oracle_txid, rpclib.getinfo(rpc_connection)["pubkey"], "0.001")
oracle_subscription_txid = rpclib.sendrawtransaction(rpc_connection,
oracle_subscription_hex['hex'])
mempool = rpclib.get_rawmempool(rpc_connection)
if oracle_subscription_txid in mempool:
break
else:
pass
print(colorize("Oracle subscription transaction broadcasted: " + oracle_subscription_txid, "green"))
utxo_num = utxo_num - 1
# waiting for last broadcasted subscribtion transaction to be mined to be sure that money are on oracle balance
while True:
mempool = rpclib.get_rawmempool(rpc_connection)
if oracle_subscription_txid in mempool:
print("Waiting for oracle subscribtion tx to be mined" + "\n")
time.sleep(6)
pass
else:
break
print("Oracle preparation is finished. Oracle txid: " + new_oracle_txid)
# can publish data now
counter = 0
for chunk in data:
hex_length_bigendian = format(round(len(chunk) / 2), '#06x')[2:]
# swap to get little endian length
a = hex_length_bigendian[2:]
b = hex_length_bigendian[:2]
hex_length = a + b
data_for_oracle = str(hex_length) + chunk
counter = counter + 1
# print("Chunk number: " + str(counter) + "\n")
# print(data_for_oracle)
try:
oracles_data_hex = rpclib.oracles_data(rpc_connection, new_oracle_txid, data_for_oracle)
except Exception as e:
print(data_for_oracle)
print(e)
input("Press [Enter] to continue...")
break
# on broadcasting ensuring that previous one reached mempool before blast next one
while True:
mempool = rpclib.get_rawmempool(rpc_connection)
oracle_data_txid = rpclib.sendrawtransaction(rpc_connection, oracles_data_hex["hex"])
#time.sleep(0.1)
if oracle_data_txid in mempool:
break
else:
pass
# blasting not more than 100 at once (so maximum capacity per block can be changed here)
# but keep in mind that registration UTXOs amount needs to be changed too !
if counter % 100 == 0 and chunks_amount > 100:
while True:
mempool = rpclib.get_rawmempool(rpc_connection)
if oracle_data_txid in mempool:
print("Waiting for previous data chunks to be mined before send new ones" + "\n")
print("Sent " + str(counter) + " chunks from " + str(chunks_amount))
time.sleep(6)
pass
else:
break
print("Last baton: " + oracle_data_txid)
input("Press [Enter] to continue...")
break
# if file suits single oraclesdata just broadcasting it straight without any slicing
else:
hex_length_bigendian = format(length, '#06x')[2:]
# swap to get little endian length
a = hex_length_bigendian[2:]
b = hex_length_bigendian[:2]
hex_length = a + b
data_for_oracle = str(hex_length) + hex_data
print("File hex representation: \n")
print(data_for_oracle + "\n")
print("Length: " + str(length) + " bytes")
print("File converted!")
new_oracle_hex = rpclib.oracles_create(rpc_connection, "tonyconvert_" + "1", path, "D")
new_oracle_txid = rpclib.sendrawtransaction(rpc_connection, new_oracle_hex["hex"])
time.sleep(0.5)
oracle_register_hex = rpclib.oracles_register(rpc_connection, new_oracle_txid, "10000")
oracle_register_txid = rpclib.sendrawtransaction(rpc_connection, oracle_register_hex["hex"])
time.sleep(0.5)
oracle_subscribe_hex = rpclib.oracles_subscribe(rpc_connection, new_oracle_txid, rpclib.getinfo(rpc_connection)["pubkey"], "0.001")
oracle_subscribe_txid = rpclib.sendrawtransaction(rpc_connection, oracle_subscribe_hex["hex"])
time.sleep(0.5)
while True:
mempool = rpclib.get_rawmempool(rpc_connection)
if oracle_subscribe_txid in mempool:
print("Waiting for oracle subscribtion tx to be mined" + "\n")
time.sleep(6)
pass
else:
break
oracles_data_hex = rpclib.oracles_data(rpc_connection, new_oracle_txid, data_for_oracle)
try:
oracle_data_txid = rpclib.sendrawtransaction(rpc_connection, oracles_data_hex["hex"])
except Exception as e:
print(oracles_data_hex)
print(e)
input("Press [Enter] to continue...")
break
else:
print("Oracle created: " + str(new_oracle_txid))
print("Data published: " + str(oracle_data_txid))
input("Press [Enter] to continue...")
break
def get_files_list(rpc_connection):
start_time = time.time()
oracles_list = rpclib.oracles_list(rpc_connection)
files_list = []
for oracle_txid in oracles_list:
oraclesinfo_result = rpclib.oracles_info(rpc_connection, oracle_txid)
description = oraclesinfo_result['description']
name = oraclesinfo_result['name']
if name[0:12] == 'tonyconvert_':
new_file = '[' + name + ': ' + description + ']: ' + oracle_txid
files_list.append(new_file)
print("--- %s seconds ---" % (time.time() - start_time))
return files_list
def display_files_list(rpc_connection):
print("Scanning oracles. Please wait...")
list_to_display = get_files_list(rpc_connection)
while True:
for file in list_to_display:
print(file + "\n")
input("Press [Enter] to continue...")
break
def files_downloader(rpc_connection):
while True:
display_files_list(rpc_connection)
print("\n")
oracle_id = input("Input oracle ID you want to download file from: ")
output_path = input("Input output path for downloaded file (name included) e.g. /home/test.txt: ")
oracle_info = rpclib.oracles_info(rpc_connection, oracle_id)
name = oracle_info['name']
latest_baton_txid = oracle_info['registered'][0]['batontxid']
if name[0:12] == 'tonyconvert_':
# downloading process here
chunks_amount = int(name[12:])
data = rpclib.oracles_samples(rpc_connection, oracle_id, latest_baton_txid, str(chunks_amount))["samples"]
for chunk in reversed(data):
with open(output_path, 'ab+') as file:
file.write(unhexlify(chunk[0]))
print("I hope that file saved to " + output_path + "\n")
input("Press [Enter] to continue...")
break
else:
print("I cant recognize file inside this oracle. I'm very sorry, boss.")
input("Press [Enter] to continue...")
break
def marmara_receive_tui(rpc_connection):
while True:
issuer_pubkey = input("Input pubkey of person who do you want to receive MARMARA from: ")
issuance_sum = input("Input amount of MARMARA you want to receive: ")
blocks_valid = input("Input amount of blocks for cheque matures: ")
try:
marmara_receive_txinfo = rpc_connection.marmarareceive(issuer_pubkey, issuance_sum, "MARMARA", blocks_valid)
marmara_receive_txid = rpc_connection.sendrawtransaction(marmara_receive_txinfo["hex"])
print("Marmara receive txid broadcasted: " + marmara_receive_txid + "\n")
print(json.dumps(marmara_receive_txinfo, indent=4, sort_keys=True) + "\n")
with open("receive_txids.txt", 'a+') as file:
file.write(marmara_receive_txid + "\n")
file.write(json.dumps(marmara_receive_txinfo, indent=4, sort_keys=True) + "\n")
print("Transaction id is saved to receive_txids.txt file.")
input("Press [Enter] to continue...")
break
except Exception as e:
print(marmara_receive_txinfo)
print(e)
print("Something went wrong. Please check your input")
def marmara_issue_tui(rpc_connection):
while True:
receiver_pubkey = input("Input pubkey of person who do you want to issue MARMARA: ")
issuance_sum = input("Input amount of MARMARA you want to issue: ")
maturing_block = input("Input number of block on which issuance mature: ")
approval_txid = input("Input receiving request transaction id: ")
try:
marmara_issue_txinfo = rpc_connection.marmaraissue(receiver_pubkey, issuance_sum, "MARMARA", maturing_block, approval_txid)
marmara_issue_txid = rpc_connection.sendrawtransaction(marmara_issue_txinfo["hex"])
print("Marmara issuance txid broadcasted: " + marmara_issue_txid + "\n")
print(json.dumps(marmara_issue_txinfo, indent=4, sort_keys=True) + "\n")
with open("issue_txids.txt", "a+") as file:
file.write(marmara_issue_txid + "\n")
file.write(json.dumps(marmara_issue_txinfo, indent=4, sort_keys=True) + "\n")
print("Transaction id is saved to issue_txids.txt file.")
input("Press [Enter] to continue...")
break
except Exception as e:
print(marmara_issue_txinfo)
print(e)
print("Something went wrong. Please check your input")
def marmara_creditloop_tui(rpc_connection):
while True:
loop_txid = input("Input transaction ID of credit loop you want to get info about: ")
try:
marmara_creditloop_info = rpc_connection.marmaracreditloop(loop_txid)
print(json.dumps(marmara_creditloop_info, indent=4, sort_keys=True) + "\n")
input("Press [Enter] to continue...")
break
except Exception as e:
print(marmara_creditloop_info)
print(e)
print("Something went wrong. Please check your input")
def marmara_settlement_tui(rpc_connection):
while True:
loop_txid = input("Input transaction ID of credit loop to make settlement: ")
try:
marmara_settlement_info = rpc_connection.marmarasettlement(loop_txid)
marmara_settlement_txid = rpc_connection.sendrawtransaction(marmara_settlement_info["hex"])
print("Loop " + loop_txid + " succesfully settled!\nSettlement txid: " + marmara_settlement_txid)
with open("settlement_txids.txt", "a+") as file:
file.write(marmara_settlement_txid + "\n")
file.write(json.dumps(marmara_settlement_info, indent=4, sort_keys=True) + "\n")
print("Transaction id is saved to settlement_txids.txt file.")
input("Press [Enter] to continue...")
break
except Exception as e:
print(marmara_settlement_info)
print(e)
print("Something went wrong. Please check your input")
input("Press [Enter] to continue...")
break
def marmara_lock_tui(rpc_connection):
while True:
amount = input("Input amount of coins you want to lock for settlement and staking: ")
unlock_height = input("Input height on which coins should be unlocked: ")
try:
marmara_lock_info = rpc_connection.marmaralock(amount, unlock_height)
marmara_lock_txid = rpc_connection.sendrawtransaction(marmara_lock_info["hex"])
with open("lock_txids.txt", "a+") as file:
file.write(marmara_lock_txid + "\n")
file.write(json.dumps(marmara_lock_info, indent=4, sort_keys=True) + "\n")
print("Transaction id is saved to lock_txids.txt file.")
input("Press [Enter] to continue...")
break
except Exception as e:
print(e)
print("Something went wrong. Please check your input")
input("Press [Enter] to continue...")
break
def marmara_info_tui(rpc_connection):
while True:
firstheight = input("Input first height (default 0): ")
if not firstheight:
firstheight = "0"
lastheight = input("Input last height (default current (0) ): ")
if not lastheight:
lastheight = "0"
minamount = input("Input min amount (default 0): ")
if not minamount:
minamount = "0"
maxamount = input("Input max amount (default 0): ")
if not maxamount:
maxamount = "0"
issuerpk = input("Optional. Input issuer public key: ")
try:
if issuerpk:
marmara_info = rpc_connection.marmarainfo(firstheight, lastheight, minamount, maxamount, "MARMARA", issuerpk)
else:
marmara_info = rpc_connection.marmarainfo(firstheight, lastheight, minamount, maxamount)
print(json.dumps(marmara_info, indent=4, sort_keys=True) + "\n")
input("Press [Enter] to continue...")
break
except Exception as e:
print(marmara_info)
print(e)
print("Something went wrong. Please check your input")
input("Press [Enter] to continue...")
break
def rogue_game_info(rpc_connection, game_txid):
game_info_arg = '"' + "[%22" + game_txid + "%22]" + '"'
game_info = rpc_connection.cclib("gameinfo", "17", game_info_arg)
return game_info
def rogue_game_register(rpc_connection, game_txid, player_txid = False):
if player_txid:
registration_info_arg = '"' + "[%22" + game_txid + "%22,%22" + player_txid + "%22]" + '"'
else:
registration_info_arg = '"' + "[%22" + game_txid + "%22]" + '"'
registration_info = rpc_connection.cclib("register", "17", registration_info_arg)
return registration_info
def rogue_pending(rpc_connection):
rogue_pending_list = rpc_connection.cclib("pending", "17")
return rogue_pending_list
def rogue_bailout(rpc_connection, game_txid):
bailout_info_arg = '"' + "[%22" + game_txid + "%22]" + '"'
bailout_info = rpc_connection.cclib("bailout", "17", bailout_info_arg)
return bailout_info
def rogue_highlander(rpc_connection, game_txid):
highlander_info_arg = '"' + "[%22" + game_txid + "%22]" + '"'
highlander_info = rpc_connection.cclib("highlander", "17", highlander_info_arg)
return highlander_info
def rogue_players_list(rpc_connection):
rogue_players_list = rpc_connection.cclib("players", "17")
return rogue_players_list
def rogue_player_info(rpc_connection, playertxid):
player_info_arg = '"' + "[%22" + playertxid + "%22]" + '"'
player_info = rpc_connection.cclib("playerinfo", "17", player_info_arg)
return player_info
def rogue_extract(rpc_connection, game_txid, pubkey):
extract_info_arg = '"' + "[%22" + game_txid + "%22,%22" + pubkey + "%22]" + '"'
extract_info = rpc_connection.cclib("extract", "17", extract_info_arg)
return extract_info
def rogue_keystrokes(rpc_connection, game_txid, keystroke):
rogue_keystrokes_arg = '"' + "[%22" + game_txid + "%22,%22" + keystroke + "%22]" + '"'
keystroke_info = rpc_connection.cclib("keystrokes", "17", rogue_keystrokes_arg)
return keystroke_info
def print_multiplayer_games_list(rpc_connection):
while True:
pending_list = rogue_pending(rpc_connection)
multiplayer_pending_list = []
for game in pending_list["pending"]:
if rogue_game_info(rpc_connection, game)["maxplayers"] > 1:
multiplayer_pending_list.append(game)
print("Multiplayer games availiable to join: \n")
for active_multiplayer_game in multiplayer_pending_list:
game_info = rogue_game_info(rpc_connection, active_multiplayer_game)
print(colorize("\n================================\n", "green"))
print("Game txid: " + game_info["gametxid"])
print("Game buyin: " + str(game_info["buyin"]))
print("Game height: " + str(game_info["gameheight"]))
print("Start height: " + str(game_info["start"]))
print("Alive players: " + str(game_info["alive"]))
print("Registered players: " + str(game_info["numplayers"]))
print("Max players: " + str(game_info["maxplayers"]))
print(colorize("\n***\n", "blue"))
print("Players in game:")
for player in game_info["players"]:
print("Slot: " + str(player["slot"]))
if "baton" in player.keys():
print("Baton: " + str(player["baton"]))
if "tokenid" in player.keys():
print("Tokenid: " + str(player["tokenid"]))
print("Is mine?: " + str(player["ismine"]))
print(colorize("\nR + Enter - refresh list.\nE + Enter - to the game choice.\nCTRL + C - back to main menu", "blue"))
is_refresh = input("Choose your destiny: ")
if is_refresh == "R":
print("\n")
pass
elif is_refresh == "E":
print("\n")
break
else:
print("\nPlease choose R or E\n")
def rogue_newgame_singleplayer(rpc_connection, is_game_a_rogue=True):
try:
new_game_txid = rpc_connection.cclib("newgame", "17", "[1]")["txid"]
print("New singleplayer training game succesfully created. txid: " + new_game_txid)
while True:
mempool = rpc_connection.getrawmempool()
if new_game_txid in mempool:
print(colorize("Waiting for game transaction to be mined", "blue"))
time.sleep(5)
else:
print(colorize("Game transaction is mined", "green"))
break
players_list = rogue_players_list(rpc_connection)
if len(players_list["playerdata"]) > 0:
print_players_list(rpc_connection)
while True:
is_choice_needed = input("Do you want to choose a player for this game? [y/n] ")
if is_choice_needed == "y":
player_txid = input("Please input player txid: ")
newgame_regisration_txid = rogue_game_register(rpc_connection, new_game_txid, player_txid)["txid"]
break
elif is_choice_needed == "n":
set_warriors_name(rpc_connection)
newgame_regisration_txid = rogue_game_register(rpc_connection, new_game_txid)["txid"]
break
else:
print("Please choose y or n !")
else:
print("No players available to select")
input("Press [Enter] to continue...")
newgame_regisration_txid = rogue_game_register(rpc_connection, new_game_txid)["txid"]
while True:
mempool = rpc_connection.getrawmempool()
if newgame_regisration_txid in mempool:
print(colorize("Waiting for registration transaction to be mined", "blue"))
time.sleep(5)
else:
print(colorize("Registration transaction is mined", "green"))
break
game_info = rogue_game_info(rpc_connection, new_game_txid)
start_time = time.time()
while True:
if is_game_a_rogue:
subprocess.call(["cc/rogue/rogue", str(game_info["seed"]), str(game_info["gametxid"])])
else:
subprocess.call(["cc/games/tetris", str(game_info["seed"]), str(game_info["gametxid"])])
time_elapsed = time.time() - start_time
if time_elapsed > 1:
break
else:
print("Game less than 1 second. Trying to start again")
time.sleep(1)
game_end_height = int(rpc_connection.getinfo()["blocks"])
while True:
current_height = int(rpc_connection.getinfo()["blocks"])
height_difference = current_height - game_end_height
if height_difference == 0:
print(current_height)
print(game_end_height)
print(colorize("Waiting for next block before bailout", "blue"))
time.sleep(5)
else:
break
#print("\nKeystrokes of this game:\n")
#time.sleep(0.5)
while True:
keystrokes_rpc_responses = find_game_keystrokes_in_log(new_game_txid)[1::2]
if len(keystrokes_rpc_responses) < 1:
print("No keystrokes broadcasted yet. Let's wait 5 seconds")
time.sleep(5)
else:
break
#print(keystrokes_rpc_responses)
for keystroke in keystrokes_rpc_responses:
json_keystroke = json.loads(keystroke)["result"]
if "status" in json_keystroke.keys() and json_keystroke["status"] == "error":
while True:
print("Trying to re-brodcast keystroke")
keystroke_rebroadcast = rogue_keystrokes(rpc_connection, json_keystroke["gametxid"], json_keystroke["keystrokes"])
if "txid" in keystroke_rebroadcast.keys():
print("Keystroke broadcasted! txid: " + keystroke_rebroadcast["txid"])
break
else:
print("Let's try again in 5 seconds")
time.sleep(5)
# waiting for last keystroke confirmation here
last_keystroke_json = json.loads(keystrokes_rpc_responses[-1])
while True:
while True:
try:
rpc_connection.sendrawtransaction(last_keystroke_json["result"]["hex"])
except Exception as e:
pass
try:
confirmations_amount = rpc_connection.getrawtransaction(last_keystroke_json["result"]["txid"], 1)["confirmations"]
break
except Exception as e:
print(e)
print("Let's wait a little bit more")
time.sleep(5)
pass
if confirmations_amount < 2:
print("Last keystroke not confirmed yet! Let's wait a little")
time.sleep(10)
else:
print("Last keystroke confirmed!")
break
while True:
print("\nExtraction info:\n")
extraction_info = rogue_extract(rpc_connection, new_game_txid, rpc_connection.getinfo()["pubkey"])
if extraction_info["status"] == "error":
print(colorize("Your warrior died or no any information about game was saved on blockchain", "red"))
print("If warrior was alive - try to wait a little (choose n to wait for a next block). If he is dead - you can bailout now (choose y).")
else:
print("Current game state:")
print("Game txid: " + extraction_info["gametxid"])
print("Information about game saved on chain: " + extraction_info["extracted"])
print("\n")
is_bailout_needed = input("Do you want to make bailout now [y] or wait for one more block [n]? [y/n]: ")
if is_bailout_needed == "y":
bailout_info = rogue_bailout(rpc_connection, new_game_txid)
while True:
try:
confirmations_amount = rpc_connection.getrawtransaction(bailout_info["txid"], 1)["confirmations"]
break
except Exception as e:
print(e)
print("Bailout not on blockchain yet. Let's wait a little bit more")
time.sleep(20)
pass
break
elif is_bailout_needed == "n":
game_end_height = int(rpc_connection.getinfo()["blocks"])
while True:
current_height = int(rpc_connection.getinfo()["blocks"])
height_difference = current_height - game_end_height
if height_difference == 0:
print(current_height)
print(game_end_height)
print(colorize("Waiting for next block before bailout", "blue"))
time.sleep(5)
else:
break
else:
print("Please choose y or n !")
print(bailout_info)
print("\nGame is finished!\n")
bailout_txid = bailout_info["txid"]
input("Press [Enter] to continue...")
except Exception as e:
print("Something went wrong.")
print(e)
input("Press [Enter] to continue...")
def play_multiplayer_game(rpc_connection):
# printing list of user active multiplayer games
active_games_list = rpc_connection.cclib("games", "17")["games"]
active_multiplayer_games_list = []
for game in active_games_list:
gameinfo = rogue_game_info(rpc_connection, game)
if gameinfo["maxplayers"] > 1:
active_multiplayer_games_list.append(gameinfo)
games_counter = 0
for active_multiplayer_game in active_multiplayer_games_list:
games_counter = games_counter + 1
is_ready_to_start = False
try:
active_multiplayer_game["seed"]
is_ready_to_start = True
except Exception as e:
pass
print(colorize("\n================================\n", "green"))
print("Game txid: " + active_multiplayer_game["gametxid"])
print("Game buyin: " + str(active_multiplayer_game["buyin"]))
if is_ready_to_start:
print(colorize("Ready for start!", "green"))
else:
print(colorize("Not ready for start yet, wait until start height!", "red"))
print("Game height: " + str(active_multiplayer_game["gameheight"]))
print("Start height: " + str(active_multiplayer_game["start"]))
print("Alive players: " + str(active_multiplayer_game["alive"]))
print("Registered players: " + str(active_multiplayer_game["numplayers"]))
print("Max players: " + str(active_multiplayer_game["maxplayers"]))
print(colorize("\n***\n", "blue"))
print("Players in game:")
for player in active_multiplayer_game["players"]:
print("Slot: " + str(player["slot"]))
print("Baton: " + str(player["baton"]))
print("Tokenid: " + str(player["tokenid"]))
print("Is mine?: " + str(player["ismine"]))
# asking user if he want to start any of them
while True:
start_game = input("\nDo you want to start any of your pendning multiplayer games?[y/n]: ")
if start_game == "y":
new_game_txid = input("Input txid of game which you want to start: ")
game_info = rogue_game_info(rpc_connection, new_game_txid)
try:
start_time = time.time()
while True:
subprocess.call(["cc/rogue/rogue", str(game_info["seed"]), str(game_info["gametxid"])])
time_elapsed = time.time() - start_time
if time_elapsed > 1:
break
else:
print("Game less than 1 second. Trying to start again")
time.sleep(1)
except Exception as e:
print("Maybe game isn't ready for start yet or your input was not correct, sorry.")
input("Press [Enter] to continue...")
break
game_end_height = int(rpc_connection.getinfo()["blocks"])
while True:
current_height = int(rpc_connection.getinfo()["blocks"])
height_difference = current_height - game_end_height
if height_difference == 0:
print(current_height)
print(game_end_height)
print(colorize("Waiting for next block before bailout or highlander", "blue"))
time.sleep(5)
else:
break
while True:
keystrokes_rpc_responses = find_game_keystrokes_in_log(new_game_txid)[1::2]
if len(keystrokes_rpc_responses) < 1:
print("No keystrokes broadcasted yet. Let's wait 5 seconds")
time.sleep(5)
else:
break
for keystroke in keystrokes_rpc_responses:
json_keystroke = json.loads(keystroke)["result"]
if "status" in json_keystroke.keys() and json_keystroke["status"] == "error":
while True:
print("Trying to re-brodcast keystroke")
keystroke_rebroadcast = rogue_keystrokes(rpc_connection, json_keystroke["gametxid"],
json_keystroke["keystrokes"])
if "txid" in keystroke_rebroadcast.keys():
print("Keystroke broadcasted! txid: " + keystroke_rebroadcast["txid"])
break
else:
print("Let's try again in 5 seconds")
time.sleep(5)
last_keystroke_json = json.loads(keystrokes_rpc_responses[-1])
while True:
while True:
try:
confirmations_amount = rpc_connection.getrawtransaction(last_keystroke_json["result"]["txid"], 1)["confirmations"]
break
except Exception as e:
print(e)
print("Let's wait a little bit more")
rpc_connection.sendrawtransaction(last_keystroke_json["result"]["hex"])
time.sleep(5)
pass
if confirmations_amount < 2:
print("Last keystroke not confirmed yet! Let's wait a little")
time.sleep(10)
else:
print("Last keystroke confirmed!")
break
while True:
print("\nExtraction info:\n")
extraction_info = rogue_extract(rpc_connection, new_game_txid, rpc_connection.getinfo()["pubkey"])
if extraction_info["status"] == "error":
print(colorize("Your warrior died or no any information about game was saved on blockchain", "red"))
print("If warrior was alive - try to wait a little (choose n to wait for a next block). If he is dead - you can bailout now (choose y).")
else:
print("Current game state:")
print("Game txid: " + extraction_info["gametxid"])
print("Information about game saved on chain: " + extraction_info["extracted"])
print("\n")
is_bailout_needed = input(
"Do you want to make bailout now [y] or wait for one more block [n]? [y/n]: ")
if is_bailout_needed == "y":
if game_info["alive"] > 1:
bailout_info = rogue_bailout(rpc_connection, new_game_txid)
try:
bailout_txid = bailout_info["txid"]
print(bailout_info)
print("\nGame is finished!\n")
input("Press [Enter] to continue...")
break
except Exception:
highlander_info = rogue_highlander(rpc_connection, new_game_txid)
highlander_info = highlander_info["txid"]
print(highlander_info)
print("\nGame is finished!\n")
input("Press [Enter] to continue...")
break
else:
highlander_info = rogue_highlander(rpc_connection, new_game_txid)
if 'error' in highlander_info.keys() and highlander_info["error"] == 'numplayers != maxplayers':
bailout_info = rogue_bailout(rpc_connection, new_game_txid)
print(bailout_info)
print("\nGame is finished!\n")
input("Press [Enter] to continue...")
break
else:
print(highlander_info)
print("\nGame is finished!\n")
input("Press [Enter] to continue...")
break
elif is_bailout_needed == "n":
game_end_height = int(rpc_connection.getinfo()["blocks"])
while True:
current_height = int(rpc_connection.getinfo()["blocks"])
height_difference = current_height - game_end_height
if height_difference == 0:
print(current_height)
print(game_end_height)
print(colorize("Waiting for next block before bailout", "blue"))
time.sleep(5)
else:
break
break
break
if start_game == "n":
print("As you wish!")
input("Press [Enter] to continue...")
break
else:
print(colorize("Choose y or n!", "red"))
def rogue_newgame_multiplayer(rpc_connection):
while True:
max_players = input("Input game max. players (>1): ")
if int(max_players) > 1:
break
else:
print("Please re-check your input")
input("Press [Enter] to continue...")
while True:
buyin = input("Input game buyin (>0.001): ")
if float(buyin) > 0.001:
break
else:
print("Please re-check your input")
input("Press [Enter] to continue...")
try:
new_game_txid = rpc_connection.cclib("newgame", "17", '"[' + max_players + "," + buyin + ']"')["txid"]
print(colorize("New multiplayer game succesfully created. txid: " + new_game_txid, "green"))
input("Press [Enter] to continue...")
except Exception as e:
print("Something went wrong.")
print(e)
input("Press [Enter] to continue...")
def rogue_join_multiplayer_game(rpc_connection):
while True:
try:
print_multiplayer_games_list(rpc_connection)
# TODO: optional player data txid (print players you have and ask if you want to choose one)
game_txid = input("Input txid of game you want to join: ")
try:
while True:
print_players_list(rpc_connection)
is_choice_needed = input("Do you want to choose a player for this game? [y/n] ")
if is_choice_needed == "y":
player_txid = input("Please input player txid: ")
newgame_regisration_txid = rogue_game_register(rpc_connection, game_txid, player_txid)["txid"]
break
elif is_choice_needed == "n":
set_warriors_name(rpc_connection)
newgame_regisration_txid = rogue_game_register(rpc_connection, game_txid)["txid"]
break
else:
print("Please choose y or n !")
except Exception as e:
print("Something went wrong. Maybe you're trying to register on game twice or don't have enough funds to pay buyin.")
print(e)
input("Press [Enter] to continue...")
break
print(colorize("Succesfully registered.", "green"))
while True:
mempool = rpc_connection.getrawmempool()
if newgame_regisration_txid in mempool:
print(colorize("Waiting for registration transaction to be mined", "blue"))
time.sleep(5)
else:
print(colorize("Registration transaction is mined", "green"))
break
print(newgame_regisration_txid)
input("Press [Enter] to continue...")
break
except KeyboardInterrupt:
break
def print_players_list(rpc_connection):
players_list = rogue_players_list(rpc_connection)
print(colorize("\nYou own " + str(players_list["numplayerdata"]) + " warriors\n", "blue"))
warrior_counter = 0
for player in players_list["playerdata"]:
warrior_counter = warrior_counter + 1
player_data = rogue_player_info(rpc_connection, player)["player"]
print(colorize("\n================================\n","green"))
print("Warrior " + str(warrior_counter))
print("Name: " + player_data["pname"] + "\n")
print("Player txid: " + player_data["playertxid"])
print("Token txid: " + player_data["tokenid"])
print("Hitpoints: " + str(player_data["hitpoints"]))
print("Strength: " + str(player_data["strength"]))
print("Level: " + str(player_data["level"]))
print("Experience: " + str(player_data["experience"]))
print("Dungeon Level: " + str(player_data["dungeonlevel"]))
print("Chain: " + player_data["chain"])
print(colorize("\nInventory:\n","blue"))
for item in player_data["pack"]:
print(item)
print("\nTotal packsize: " + str(player_data["packsize"]) + "\n")
input("Press [Enter] to continue...")
def sell_warrior(rpc_connection):
print(colorize("Your brave warriors: \n", "blue"))
print_players_list(rpc_connection)
print("\n")
while True:
need_sell = input("Do you want to place order to sell any? [y/n]: ")
if need_sell == "y":
playertxid = input("Input playertxid of warrior you want to sell: ")
price = input("Input price (in ROGUE coins) you want to sell warrior for: ")
try:
tokenid = rogue_player_info(rpc_connection, playertxid)["player"]["tokenid"]
except Exception as e:
print(e)
print("Something went wrong. Be careful with input next time.")
input("Press [Enter] to continue...")
break
token_ask_raw = rpc_connection.tokenask("1", tokenid, price)
try:
token_ask_txid = rpc_connection.sendrawtransaction(token_ask_raw["hex"])
except Exception as e:
print(e)
print(token_ask_raw)
print("Something went wrong. Be careful with input next time.")
input("Press [Enter] to continue...")
break
print(colorize("Ask succesfully placed. Ask txid is: " + token_ask_txid, "green"))
input("Press [Enter] to continue...")
break
if need_sell == "n":
print("As you wish!")
input("Press [Enter] to continue...")
break
else:
print(colorize("Choose y or n!", "red"))
#TODO: have to combine into single scanner with different cases
def is_warrior_alive(rpc_connection, warrior_txid):
warrior_alive = False
raw_transaction = rpc_connection.getrawtransaction(warrior_txid, 1)
for vout in raw_transaction["vout"]:
if vout["value"] == 0.00000001 and rpc_connection.gettxout(raw_transaction["txid"], vout["n"]):
warrior_alive = True
return warrior_alive
def warriors_scanner(rpc_connection):
start_time = time.time()
token_list = rpc_connection.tokenlist()
my_warriors_list = rogue_players_list(rpc_connection)
warriors_list = {}
for token in token_list:
player_info = rogue_player_info(rpc_connection, token)
if "status" in player_info and player_info["status"] == "error":
pass
elif player_info["player"]["playertxid"] in my_warriors_list["playerdata"]:
pass
elif not is_warrior_alive(rpc_connection, player_info["player"]["playertxid"]):
pass
else:
warriors_list[token] = player_info["player"]
print("--- %s seconds ---" % (time.time() - start_time))
return warriors_list
def warriors_scanner_for_rating(rpc_connection):
print("It can take some time")
token_list = rpc_connection.tokenlist()
my_warriors_list = rogue_players_list(rpc_connection)
actual_playerids = []
warriors_list = {}
for token in token_list:
player_info = rogue_player_info(rpc_connection, token)
if "status" in player_info and player_info["status"] == "error":
pass
else:
while True:
if "batontxid" in player_info["player"].keys():
player_info = rogue_player_info(rpc_connection, player_info["player"]["batontxid"])
else:
actual_playerids.append(player_info["player"]["playertxid"])
break
for player_id in actual_playerids:
player_info = rogue_player_info(rpc_connection, player_id)
if not is_warrior_alive(rpc_connection, player_info["player"]["playertxid"]):
pass
else:
warriors_list[player_id] = player_info["player"]
return warriors_list
def warriors_scanner_for_dex(rpc_connection):
start_time = time.time()
token_list = rpc_connection.tokenlist()
my_warriors_list = rogue_players_list(rpc_connection)
warriors_list = {}
for token in token_list:
player_info = rogue_player_info(rpc_connection, token)
if "status" in player_info and player_info["status"] == "error":
pass
elif player_info["player"]["tokenid"] in my_warriors_list["playerdata"]:
pass
else:
warriors_list[token] = player_info["player"]
print("--- %s seconds ---" % (time.time() - start_time))
return warriors_list
def print_warrior_list(rpc_connection):
players_list = warriors_scanner(rpc_connection)
print(colorize("All warriors on ROGUE chain: \n", "blue"))
warrior_counter = 0
for player in players_list:
warrior_counter = warrior_counter + 1
player_data = rogue_player_info(rpc_connection, player)["player"]
print(colorize("\n================================\n","green"))
print("Warrior " + str(warrior_counter))
print("Name: " + player_data["pname"] + "\n")
print("Player txid: " + player_data["playertxid"])
print("Token txid: " + player_data["tokenid"])
print("Hitpoints: " + str(player_data["hitpoints"]))
print("Strength: " + str(player_data["strength"]))
print("Level: " + str(player_data["level"]))
print("Experience: " + str(player_data["experience"]))
print("Dungeon Level: " + str(player_data["dungeonlevel"]))
print("Chain: " + player_data["chain"])
print(colorize("\nInventory:\n","blue"))
for item in player_data["pack"]:
print(item)
print("\nTotal packsize: " + str(player_data["packsize"]) + "\n")
input("Press [Enter] to continue...")
def place_bid_on_warriror(rpc_connection):
warriors_list = print_warrior_list(rpc_connection)
# TODO: have to drop my warriors or at least print my warriors ids
while True:
need_buy = input("Do you want to place order to buy some warrior? [y/n]: ")
if need_buy == "y":
playertxid = input("Input playertxid of warrior you want to place bid for: ")
price = input("Input price (in ROGUE coins) you want to buy warrior for: ")
tokenid = rogue_player_info(rpc_connection, playertxid)["player"]["tokenid"]
token_bid_raw = rpc_connection.tokenbid("1", tokenid, price)
try:
token_bid_txid = rpc_connection.sendrawtransaction(token_bid_raw["hex"])
except Exception as e:
print(e)
print(token_bid_raw)
print("Something went wrong. Be careful with input next time.")
input("Press [Enter] to continue...")
break
print(colorize("Bid succesfully placed. Bid txid is: " + token_bid_txid, "green"))
input("Press [Enter] to continue...")
break
if need_buy == "n":
print("As you wish!")
input("Press [Enter] to continue...")
break
else:
print(colorize("Choose y or n!", "red"))
def check_incoming_bids(rpc_connection):
# TODO: have to scan for warriors which are in asks as well
players_list = rogue_players_list(rpc_connection)
incoming_orders = []
for player in players_list["playerdata"]:
token_id = rogue_player_info(rpc_connection, player)["player"]["tokenid"]
orders = rpc_connection.tokenorders(token_id)
if len(orders) > 0:
for order in orders:
if order["funcid"] == "b":
incoming_orders.append(order)
return incoming_orders
def print_icoming_bids(rpc_connection):
incoming_bids = check_incoming_bids(rpc_connection)
for bid in incoming_bids:
print("Recieved bid for warrior " + bid["tokenid"])
player_data = rogue_player_info(rpc_connection, bid["tokenid"])["player"]
print(colorize("\n================================\n", "green"))
print("Name: " + player_data["pname"] + "\n")
print("Player txid: " + player_data["playertxid"])
print("Token txid: " + player_data["tokenid"])
print("Hitpoints: " + str(player_data["hitpoints"]))
print("Strength: " + str(player_data["strength"]))
print("Level: " + str(player_data["level"]))
print("Experience: " + str(player_data["experience"]))
print("Dungeon Level: " + str(player_data["dungeonlevel"]))
print("Chain: " + player_data["chain"])
print(colorize("\nInventory:\n", "blue"))
for item in player_data["pack"]:
print(item)
print("\nTotal packsize: " + str(player_data["packsize"]) + "\n")
print(colorize("\n================================\n", "blue"))
print("Order info: \n")
print("Bid txid: " + bid["txid"])
print("Price: " + str(bid["price"]) + "\n")
if len(incoming_bids) == 0:
print(colorize("There is no any incoming orders!", "blue"))
input("Press [Enter] to continue...")
else:
while True:
want_to_sell = input("Do you want to fill any incoming bid? [y/n]: ")
if want_to_sell == "y":
bid_txid = input("Input bid txid you want to fill: ")
for bid in incoming_bids:
if bid_txid == bid["txid"]:
tokenid = bid["tokenid"]
fill_sum = bid["totalrequired"]
fillbid_hex = rpc_connection.tokenfillbid(tokenid, bid_txid, str(fill_sum))
try:
fillbid_txid = rpc_connection.sendrawtransaction(fillbid_hex["hex"])
except Exception as e:
print(e)
print(fillbid_hex)
print("Something went wrong. Be careful with input next time.")
input("Press [Enter] to continue...")
break
print(colorize("Warrior succesfully sold. Txid is: " + fillbid_txid, "green"))
input("Press [Enter] to continue...")
break
if want_to_sell == "n":
print("As you wish!")
input("Press [Enter] to continue...")
break
else:
print(colorize("Choose y or n!", "red"))
def find_warriors_asks(rpc_connection):
warriors_list = warriors_scanner_for_dex(rpc_connection)
warriors_asks = []
for player in warriors_list:
orders = rpc_connection.tokenorders(player)
if len(orders) > 0:
for order in orders:
if order["funcid"] == "s":
warriors_asks.append(order)
for ask in warriors_asks:
print(colorize("\n================================\n", "green"))
print("Warrior selling on marketplace: " + ask["tokenid"])
player_data = rogue_player_info(rpc_connection, ask["tokenid"])["player"]
print("Name: " + player_data["pname"] + "\n")
print("Player txid: " + player_data["playertxid"])
print("Token txid: " + player_data["tokenid"])
print("Hitpoints: " + str(player_data["hitpoints"]))
print("Strength: " + str(player_data["strength"]))
print("Level: " + str(player_data["level"]))
print("Experience: " + str(player_data["experience"]))
print("Dungeon Level: " + str(player_data["dungeonlevel"]))
print("Chain: " + player_data["chain"])
print(colorize("\nInventory:\n", "blue"))
for item in player_data["pack"]:
print(item)
print("\nTotal packsize: " + str(player_data["packsize"]) + "\n")
print(colorize("Order info: \n", "red"))
print("Ask txid: " + ask["txid"])
print("Price: " + str(ask["price"]) + "\n")
while True:
want_to_buy = input("Do you want to buy any warrior? [y/n]: ")
if want_to_buy == "y":
ask_txid = input("Input asktxid which you want to fill: ")
for ask in warriors_asks:
if ask_txid == ask["txid"]:
tokenid = ask["tokenid"]
try:
fillask_raw = rpc_connection.tokenfillask(tokenid, ask_txid, "1")
except Exception as e:
print("Something went wrong. Be careful with input next time.")
input("Press [Enter] to continue...")
break
try:
fillask_txid = rpc_connection.sendrawtransaction(fillask_raw["hex"])
except Exception as e:
print(e)
print(fillask_raw)
print("Something went wrong. Be careful with input next time.")
input("Press [Enter] to continue...")
break
print(colorize("Warrior succesfully bought. Txid is: " + fillask_txid, "green"))
input("Press [Enter] to continue...")
break
if want_to_buy == "n":
print("As you wish!")
input("Press [Enter] to continue...")
break
else:
print(colorize("Choose y or n!", "red"))
def warriors_orders_check(rpc_connection):
my_orders_list = rpc_connection.mytokenorders("17")
warriors_orders = {}
for order in my_orders_list:
player_info = rogue_player_info(rpc_connection, order["tokenid"])
if "status" in player_info and player_info["status"] == "error":
pass
else:
warriors_orders[order["tokenid"]] = order
bids_list = []
asks_list = []
for order in warriors_orders:
if warriors_orders[order]["funcid"] == "s":
asks_list.append(warriors_orders[order])
else:
bids_list.append(order)
print(colorize("\nYour asks:\n", "blue"))
print(colorize("\n********************************\n", "red"))
for ask in asks_list:
print("txid: " + ask["txid"])
print("Price: " + ask["price"])
print("Warrior tokenid: " + ask["tokenid"])
print(colorize("\n================================\n", "green"))
print("Warrior selling on marketplace: " + ask["tokenid"])
player_data = rogue_player_info(rpc_connection, ask["tokenid"])["player"]
print("Name: " + player_data["pname"] + "\n")
print("Player txid: " + player_data["playertxid"])
print("Token txid: " + player_data["tokenid"])
print("Hitpoints: " + str(player_data["hitpoints"]))
print("Strength: " + str(player_data["strength"]))
print("Level: " + str(player_data["level"]))
print("Experience: " + str(player_data["experience"]))
print("Dungeon Level: " + str(player_data["dungeonlevel"]))
print("Chain: " + player_data["chain"])
print(colorize("\nInventory:\n", "blue"))
for item in player_data["pack"]:
print(item)
print("\nTotal packsize: " + str(player_data["packsize"]) + "\n")
print(colorize("\n================================\n", "green"))
print(colorize("\nYour bids:\n", "blue"))
print(colorize("\n********************************\n", "red"))
for bid in bids_list:
print("txid: " + bid["txid"])
print("Price: " + bid["price"])
print("Warrior tokenid: " + bid["tokenid"])
print(colorize("\n================================\n", "green"))
print("Warrior selling on marketplace: " + bid["tokenid"])
player_data = rogue_player_info(rpc_connection, bid["tokenid"])["player"]
print("Name: " + player_data["pname"] + "\n")
print("Player txid: " + player_data["playertxid"])
print("Token txid: " + player_data["tokenid"])
print("Hitpoints: " + str(player_data["hitpoints"]))
print("Strength: " + str(player_data["strength"]))
print("Level: " + str(player_data["level"]))
print("Experience: " + str(player_data["experience"]))
print("Dungeon Level: " + str(player_data["dungeonlevel"]))
print("Chain: " + player_data["chain"])
print(colorize("\nInventory:\n", "blue"))
for item in player_data["pack"]:
print(item)
print("\nTotal packsize: " + str(player_data["packsize"]) + "\n")
print(colorize("\n================================\n", "green"))
while True:
need_order_change = input("Do you want to cancel any of your orders? [y/n]: ")
if need_order_change == "y":
while True:
ask_or_bid = input("Do you want cancel ask or bid? [a/b]: ")
if ask_or_bid == "a":
ask_txid = input("Input txid of ask you want to cancel: ")
warrior_tokenid = input("Input warrior token id for this ask: ")
try:
ask_cancellation_hex = rpc_connection.tokencancelask(warrior_tokenid, ask_txid)
ask_cancellation_txid = rpc_connection.sendrawtransaction(ask_cancellation_hex["hex"])
except Exception as e:
print(colorize("Please re-check your input!", "red"))
print(colorize("Ask succefully cancelled. Cancellation txid: " + ask_cancellation_txid, "green"))
break
if ask_or_bid == "b":
bid_txid = input("Input txid of bid you want to cancel: ")
warrior_tokenid = input("Input warrior token id for this bid: ")
try:
bid_cancellation_hex = rpc_connection.tokencancelbid(warrior_tokenid, bid_txid)
bid_cancellation_txid = rpc_connection.sendrawtransaction(bid_cancellation_hex["hex"])
except Exception as e:
print(colorize("Please re-check your input!", "red"))
print(colorize("Bid succefully cancelled. Cancellation txid: " + bid_cancellation_txid, "green"))
break
else:
print(colorize("Choose a or b!", "red"))
input("Press [Enter] to continue...")
break
if need_order_change == "n":
print("As you wish!")
input("Press [Enter] to continue...")
break
else:
print(colorize("Choose y or n!", "red"))
def set_warriors_name(rpc_connection):
warriors_name = input("What warrior name do you want for legends and tales about your brave adventures?: ")
warrior_name_arg = '"' + "[%22" + warriors_name + "%22]" + '"'
set_name_status = rpc_connection.cclib("setname", "17", warrior_name_arg)
print(colorize("Warrior name succesfully set", "green"))
print("Result: " + set_name_status["result"])
print("Name: " + set_name_status["pname"])
input("Press [Enter] to continue...")
def top_warriors_rating(rpc_connection):
start_time = time.time()
warriors_list = warriors_scanner_for_rating(rpc_connection)
warriors_exp = {}
for warrior in warriors_list:
warriors_exp[warrior] = warriors_list[warrior]["experience"]
warriors_exp_sorted = {}
temp = [(k, warriors_exp[k]) for k in sorted(warriors_exp, key=warriors_exp.get, reverse=True)]
for k,v in temp:
warriors_exp_sorted[k] = v
counter = 0
for experienced_warrior in warriors_exp_sorted:
if counter < 20:
counter = counter + 1
print("\n" + str(counter) + " place.")
print(colorize("\n================================\n", "blue"))
player_data = rogue_player_info(rpc_connection, experienced_warrior)["player"]
print("Name: " + player_data["pname"] + "\n")
print("Player txid: " + player_data["playertxid"])
print("Token txid: " + player_data["tokenid"])
print("Hitpoints: " + str(player_data["hitpoints"]))
print("Strength: " + str(player_data["strength"]))
print("Level: " + str(player_data["level"]))
print("Experience: " + str(player_data["experience"]))
print("Dungeon Level: " + str(player_data["dungeonlevel"]))
print("Chain: " + player_data["chain"])
print("--- %s seconds ---" % (time.time() - start_time))
input("Press [Enter] to continue...")
def exit():
sys.exit()
def warrior_trasnfer(rpc_connection):
print(colorize("Your brave warriors: \n", "blue"))
print_players_list(rpc_connection)
print("\n")
while True:
need_transfer = input("Do you want to transfer any warrior? [y/n]: ")
if need_transfer == "y":
warrior_tokenid = input("Input warrior tokenid: ")
recepient_pubkey = input("Input recepient pubkey: ")
try:
token_transfer_hex = rpc_connection.tokentransfer(warrior_tokenid, recepient_pubkey, "1")
token_transfer_txid = rpc_connection.sendrawtransaction(token_transfer_hex["hex"])
except Exception as e:
print(e)
print("Something went wrong. Please be careful with your input next time!")
input("Press [Enter] to continue...")
break
print(colorize("Warrior succesfully transferred! Transfer txid: " + token_transfer_txid, "green"))
input("Press [Enter] to continue...")
break
if need_transfer == "n":
print("As you wish!")
input("Press [Enter] to continue...")
break
else:
print(colorize("Choose y or n!", "red"))
def check_if_config_is_here(rpc_connection, assetchain_name):
config_name = assetchain_name + ".conf"
if os.path.exists(config_name):
print(colorize("Config is already in daemon folder", "green"))
else:
if operating_system == 'Darwin':
path_to_config = os.environ['HOME'] + '/Library/Application Support/Komodo/' + assetchain_name + '/' + config_name
elif operating_system == 'Linux':
path_to_config = os.environ['HOME'] + '/.komodo/' + assetchain_name + '/' + config_name
elif operating_system == 'Win64' or operating_system == 'Windows':
path_to_config = '%s/komodo/' + assetchain_name + '/' + config_name % os.environ['APPDATA']
try:
copy(path_to_config, os.getcwd())
except Exception as e:
print(e)
print("Can't copy config to current daemon directory automatically by some reason.")
print("Please copy it manually. It's locating here: " + path_to_config)
def find_game_keystrokes_in_log(gametxid):
operating_system = platform.system()
if operating_system == 'Win64' or operating_system == 'Windows':
p1 = subprocess.Popen(["type", "keystrokes.log"], stdout=subprocess.PIPE, shell=True)
p2 = subprocess.Popen(["findstr", gametxid], stdin=p1.stdout, stdout=subprocess.PIPE, shell=True)
else:
p1 = subprocess.Popen(["cat", "keystrokes.log"], stdout=subprocess.PIPE)
p2 = subprocess.Popen(["grep", gametxid], stdin=p1.stdout, stdout=subprocess.PIPE)
p1.stdout.close()
output = p2.communicate()[0]
keystrokes_log_for_game = bytes.decode(output).split("\n")
return keystrokes_log_for_game
def check_if_tx_in_mempool(rpc_connection, txid):
while True:
mempool = rpc_connection.getrawmempool()
if txid in mempool:
print(colorize("Waiting for " + txid + " transaction to be mined", "blue"))
time.sleep(5)
else:
print(colorize("Transaction is mined", "green"))
break
|
py | 1a3bc98fe47c8b0a61bb3e30f71760adb942a24b | # Generated by Django 3.2 on 2021-04-14 02:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('spotify', '0004_auto_20210413_1843'),
]
operations = [
migrations.RemoveField(
model_name='artist',
name='followers_href',
),
migrations.RemoveField(
model_name='artist',
name='genres',
),
migrations.RemoveField(
model_name='artist',
name='href',
),
migrations.RemoveField(
model_name='artist',
name='images',
),
migrations.AddField(
model_name='artist',
name='image',
field=models.CharField(max_length=255, null=True),
),
migrations.AlterField(
model_name='artist',
name='spotify_id',
field=models.CharField(max_length=255, null=True),
),
]
|
py | 1a3bca38165fc8a59336eb0c60d0c9ac6c604251 | import hashlib
flag = 0
pass_hash = input("Please Enter the hash you want to crack :")
optn = input("Do you want to Give a Custom Dictionary(Y/n)")
if (optn=="Y") or (optn=="y") or (optn=="yes"):
pass_file = input("enter the file name :")
try:
pass_file = open(pass_file, "r")
except:
print("No file found")
quit()
for word in pass_file:
enc_word = word.encode('utf-8')
digest = hashlib.sha512(enc_word.strip()).hexdigest()
print(word)
print(digest)
print(pass_hash)
if digest == pass_hash:
print("Password has been found: " + word)
break
elif (optn=="N") or (optn=="n") or (optn=="no"):
pass_file = "/usr/share/wordlists/rockyou.txt"
try:
pass_file = open(pass_file, "r")
except:
print("No file found")
quit()
for word in pass_file:
enc_word = word.encode('utf-8')
digest = hashlib.sha512(enc_word.strip()).hexdigest()
print(word)
print(digest)
print(pass_hash)
if digest == pass_hash:
print("Password has been found: " + word)
break
if flag == 1:
print("Password is not in the list")
|
py | 1a3bca9d7416d1dbc6ba71331e23f3214ecf216c | """Define a RainMachine controller class."""
# pylint: disable=too-few-public-methods,too-many-instance-attributes
from datetime import datetime, timedelta
from typing import Awaitable, Callable, Optional
from regenmaschine.api import API
from regenmaschine.diagnostics import Diagnostics
from regenmaschine.parser import Parser
from regenmaschine.program import Program
from regenmaschine.provision import Provision
from regenmaschine.restriction import Restriction
from regenmaschine.stats import Stats
from regenmaschine.watering import Watering
from regenmaschine.zone import Zone
URL_BASE_LOCAL: str = "https://{0}:{1}/api/4"
URL_BASE_REMOTE: str = "https://api.rainmachine.com/{0}/api/4"
class Controller: # pylint: disable=too-many-instance-attributes
"""Define the controller."""
def __init__(self, request: Callable[..., Awaitable[dict]]) -> None:
"""Initialize."""
self._access_token: Optional[str] = None
self._access_token_expiration: Optional[datetime] = None
self._client_request: Callable[..., Awaitable[dict]] = request
self._host: Optional[str] = None
self._ssl: bool = True
self.api_version: Optional[str] = None
self.hardware_version: Optional[int] = None
self.mac: Optional[str] = None
self.name: Optional[str] = None
self.software_version: Optional[str] = None
# API endpoints:
self.api: API = API(self._request)
self.diagnostics: Diagnostics = Diagnostics(self._request)
self.parsers: Parser = Parser(self._request)
self.programs: Program = Program(self._request)
self.provisioning: Provision = Provision(self._request)
self.restrictions: Restriction = Restriction(self._request)
self.stats: Stats = Stats(self._request)
self.watering: Watering = Watering(self._request)
self.zones: Zone = Zone(self._request)
async def _request(
self,
method: str,
endpoint: str,
*,
headers: Optional[dict] = None,
params: Optional[dict] = None,
json: Optional[dict] = None,
ssl: bool = True,
) -> dict:
"""Wrap the generic request method to add access token, etc."""
return await self._client_request(
method,
f"{self._host}/{endpoint}",
access_token=self._access_token,
access_token_expiration=self._access_token_expiration,
headers=headers,
params=params,
json=json,
ssl=ssl,
)
class LocalController(Controller):
"""Define a controller accessed over the LAN."""
def __init__( # pylint: disable=too-many-arguments
self, request: Callable[..., Awaitable[dict]], host: str, port: int, ssl: bool
) -> None:
"""Initialize."""
super().__init__(request)
self._host: str = URL_BASE_LOCAL.format(host, port)
self._ssl: bool = ssl
async def login(self, password):
"""Authenticate against the device (locally)."""
auth_resp: dict = await self._client_request(
"post", f"{self._host}/auth/login", json={"pwd": password, "remember": 1}
)
self._access_token: str = auth_resp["access_token"]
self._access_token_expiration: datetime = datetime.now() + timedelta(
seconds=int(auth_resp["expires_in"]) - 10
)
class RemoteController(Controller):
"""Define a controller accessed over RainMachine's cloud."""
async def login(
self, stage_1_access_token: str, sprinkler_id: str, password: str
) -> None:
"""Authenticate against the device (remotely)."""
auth_resp: dict = await self._client_request(
"post",
"https://my.rainmachine.com/devices/login-sprinkler",
access_token=stage_1_access_token,
json={"sprinklerId": sprinkler_id, "pwd": password},
)
self._access_token: str = auth_resp["access_token"]
self._host: str = URL_BASE_REMOTE.format(sprinkler_id)
|
py | 1a3bcaa487e00994cb0ffb4b238f1fa75ce8d39d | import subprocess
import threading
import platform
import socket
import os
from electrum import constants
from electrum.plugin import BasePlugin, hook
from electrum.i18n import _
from electrum.util import UserFacingException
from electrum.logging import get_logger
from electrum.network import Network
_logger = get_logger('plugins.bwt')
plugin_dir = os.path.dirname(__file__)
bwt_bin = os.path.join(plugin_dir, 'bwt')
if platform.system() == 'Windows':
bwt_bin = '%s.exe' % bwt_bin
class BwtPlugin(BasePlugin):
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.proc = None
self.wallets = set()
self.enabled = config.get('bwt_enabled')
self.bitcoind_url = config.get('bwt_bitcoind_url', default_bitcoind_url())
self.bitcoind_dir = config.get('bwt_bitcoind_dir', default_bitcoind_dir())
self.bitcoind_wallet = config.get('bwt_bitcoind_wallet')
self.bitcoind_cred = config.get('bwt_bitcoind_cred')
self.rescan_since = config.get('bwt_rescan_since', 'all')
self.custom_opt = config.get('bwt_custom_opt')
self.socket_path = config.get('bwt_socket_path', default_socket_path())
self.verbose = config.get('bwt_verbose', 0)
if config.get('bwt_was_oneserver') is None:
config.set_key('bwt_was_oneserver', config.get('oneserver'))
self.start()
def start(self):
if not self.enabled or not self.wallets:
return
self.rpc_port = free_port()
args = [
'--network', get_network_name(),
'--bitcoind-url', self.bitcoind_url,
'--bitcoind-dir', self.bitcoind_dir,
'--electrum-rpc-addr', '127.0.0.1:%d' % self.rpc_port,
]
if self.bitcoind_cred:
args.extend([ '--bitcoind-cred', self.bitcoind_cred ])
if self.bitcoind_wallet:
args.extend([ '--bitcoind-wallet', self.bitcoind_wallet ])
if self.socket_path:
args.extend([ '--unix-listener-path', self.socket_path ])
for wallet in self.wallets:
for xpub in wallet.get_master_public_keys():
args.extend([ '--xpub', '%s:%s' % (xpub, self.rescan_since) ])
for i in range(self.verbose):
args.append('-v')
if self.custom_opt:
# XXX this doesn't support arguments with spaces. thankfully bwt doesn't currently have any.
args.extend(self.custom_opt.split(' '))
self.stop()
_logger.info('Starting bwt daemon')
_logger.debug('bwt options: %s' % ' '.join(args))
if platform.system() == 'Windows':
# hide the console window. can be done with subprocess.CREATE_NO_WINDOW in python 3.7.
suinfo = subprocess.STARTUPINFO()
suinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
else: suinfo = None
self.proc = subprocess.Popen([ bwt_bin ] + args, startupinfo=suinfo, \
stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.DEVNULL)
self.thread = threading.Thread(target=proc_logger, args=(self.proc, self.handle_log), daemon=True)
self.thread.start()
def stop(self):
if self.proc:
_logger.info('Stopping bwt daemon')
self.proc.terminate()
self.proc = None
self.thread = None
def set_server(self):
network = Network.get_instance()
net_params = network.get_parameters()._replace(
host='127.0.0.1',
port=self.rpc_port,
protocol='t',
oneserver=True,
)
network.run_from_another_thread(network.set_parameters(net_params))
@hook
def load_wallet(self, wallet, main_window):
if wallet.get_master_public_keys():
num_wallets = len(self.wallets)
self.wallets |= {wallet}
if len(self.wallets) != num_wallets:
self.start()
else:
_logger.warning('%s wallets are unsupported, skipping' % wallet.wallet_type)
@hook
def close_wallet(self, wallet):
self.wallets -= {wallet}
if not self.wallets:
self.stop()
def close(self):
BasePlugin.close(self)
self.stop()
# restore the user's previous oneserver setting when the plugin is disabled
was_oneserver = self.config.get('bwt_was_oneserver')
if was_oneserver is not None:
self.config.set_key('oneserver', was_oneserver)
self.config.set_key('bwt_was_oneserver', None)
def handle_log(self, level, pkg, msg):
if msg.startswith('Electrum RPC server running'):
self.set_server()
def proc_logger(proc, log_handler):
for line in iter(proc.stdout.readline, b''):
line = line.decode('utf-8').strip()
_logger.debug(line)
if '::' in line and '>' in line:
level, _, line = line.partition(' ')
pkg, _, msg = line.partition('>')
log_handler(level, pkg.strip(), msg.strip())
elif line.lower().startswith('error: '):
log_handler('ERROR', 'bwt', line[7:])
else:
log_handler('INFO', 'bwt', line)
def get_network_name():
if constants.net == constants.BitcoinMainnet:
return 'bitcoin'
elif constants.net == constants.BitcoinTestnet:
return 'testnet'
elif constants.net == constants.BitcoinRegtest:
return 'regtest'
raise UserFacingException(_('Unsupported network {}').format(constants.net))
def default_bitcoind_url():
return 'http://localhost:%d/' % \
{ 'bitcoin': 8332, 'testnet': 18332, 'regtest': 18443 }[get_network_name()]
def default_bitcoind_dir():
if platform.system() == 'Windows':
return os.path.expandvars('%APPDATA%\\Bitcoin')
else:
return os.path.expandvars('$HOME/.bitcoin')
def default_socket_path():
if platform.system() == 'Linux' and os.access(plugin_dir, os.W_OK | os.X_OK):
return os.path.join(plugin_dir, 'bwt-socket')
def free_port():
with socket.socket() as s:
s.bind(('',0))
return s.getsockname()[1]
|
py | 1a3bcb218b4bbe8e5257551c8b8ba674a4150da8 | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper class around TF estimator to perform training."""
import gin
import tensorflow.compat.v1 as tf
from polish.utils import tf_utils
@gin.configurable
class PpoTrainer(object):
"""Wrapper class for PPO TF estimator training.
This class mainly receives any compatible `input_fn` and `model_fn` functions
for a TF estimator and launches estimator training. `input_fn` is a function
that feeds dictionary of arrays into the model. `model_fn` is a function that
defines the network architecture and training operation.
"""
def __init__(self,
input_fn,
model_fn,
num_iterations=156160,
iterations_per_loop=320,
checkpoint_dir=gin.REQUIRED,
keep_checkpoint_max=20,
use_tpu=False):
"""Creates a PPO training class.
Args:
input_fn: The function to feed in input data during training.
model_fn: The model to train on.
num_iterations: The number of iterations to run the training for.
iterations_per_loop: Number of steps to run on TPU before outfeeding
metrics to the CPU. If the number of iterations in the loop would exceed
the number of train steps, the loop will exit before reaching
--iterations_per_loop. The larger this value is, the higher the
utilization on the TPU.
checkpoint_dir: The directory to save checkpoints to.
keep_checkpoint_max: The maximum number of checkpoints to keep.
use_tpu: If True, use TPU for model training.
"""
self._input_fn = input_fn
self._model_fn = model_fn
self._num_iterations = num_iterations
self._iterations_per_loop = iterations_per_loop
self._checkpoint_dir = checkpoint_dir
self._keep_checkpoint_max = keep_checkpoint_max
self._use_tpu = use_tpu
def get_estimator(self):
"""Obtain estimator for the working directory.
Returns:
an (TPU/non-TPU) estimator.
"""
if self._use_tpu:
return tf_utils.get_tpu_estimator(self._checkpoint_dir, self._model_fn)
run_config = tf.estimator.RunConfig(
save_summary_steps=self._iterations_per_loop,
save_checkpoints_steps=self._iterations_per_loop,
keep_checkpoint_max=self._keep_checkpoint_max)
return tf.estimator.Estimator(
self._model_fn, model_dir=self._checkpoint_dir, config=run_config)
def train(self):
"""A wrapper to launch training on the estimator."""
estimator = self.get_estimator()
hooks = [self._input_fn]
estimator.train(
input_fn=self._input_fn, hooks=hooks, max_steps=self._num_iterations)
|
py | 1a3bcb3a22a0dd90b774823a1c17d1600d9021d6 | from chill import *
source('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/fdtd-2d/kernel.c')
destination('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/experiments/fdtd-2d/tmp_files/6762.c')
procedure('kernel_fdtd_2d')
loop(0)
known(' nx > 1 ')
known(' ny > 1 ')
tile(1,2,16,2)
tile(1,4,16,4)
tile(2,2,16,2)
tile(2,4,16,4)
tile(3,2,16,2)
tile(3,4,16,4)
|
py | 1a3bce54903aafaa696315dcc2bdce756bacefde |
import streamlit as st
import urllib3
import numpy as np
from PIL import Image
import cv2
import requests
import socket
#================================
# Message Headers
#=================================
COMMAND_START=bytes("<command>",'utf-8')
COMMAND_END=bytes("</command>","utf-8")
IMAGE_START=bytes("","utf-8")
#================================
# Web App Init Elements
#=================================
st.title("ONVIF CCTV Connect")
st.write("(C) Faizansoft International 2000-2021")
st.write("\r\n")
st.write("Note: This demo will only work with ONVIF compatible IP cameras that have the live-jpeg API.")
st.write("The reason for live jpeg being chosen over rtsp/rtmp is due to reliability on low resourced cameras.")
#=================================
# Set up Yolo V4
#=================================
class YoloV4Model:
def __init__(self,yolocfg,yoloweights,coconames):
self.CONFIDENCE_THRESHOLD = 0.2
self.NMS_THRESHOLD=0.4
#Set up neural network and configure Backend and Target
dnn_net=cv2.dnn.readNetFromDarknet(yolocfg, yoloweights)
dnn_net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
dnn_net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)
#Set up the DNN Model
dnn_model=cv2.dnn_DetectionModel(dnn_net)
dnn_model.setInputParams(size=(416, 416), scale=1/255, swapRB=True)
self._dnn_model=dnn_model
#Setup the coco.names list
COCO_NAMES_LIST=[]
with open("coco.names","r") as coco_names:
COCO_NAMES_LIST=coco_names.readlines()
self._COCO_NAMES_LIST=COCO_NAMES_LIST
def DetectObjects_retFrameDetList(self,frame):
_classes,_scores,_boxes=self._dnn_model.detect(frame,self.CONFIDENCE_THRESHOLD,self.NMS_THRESHOLD)
#Text List for detections
DET_LIST=[]
for (_class,_score,_box) in zip(_classes,_scores,_boxes):
_class=_class.tolist()
_score=_score.tolist()
_box=_box.tolist()
cv2.rectangle(frame,_box, (0,255,0), 2)
cv2.putText(frame, self._COCO_NAMES_LIST[_class[0]], (_box[0], _box[1] - 10), cv2.FONT_HERSHEY_SIMPLEX,5,(0,255,255), 2)
DET_LIST.extend("Detected {} @ {},{}.".format(self._COCO_NAMES_LIST[_class[0]],_box[0],_box[1]))
return (frame,DET_LIST)
_yoloV4=YoloV4Model("yolov4.cfg","yolov4.weights","coco.names")
#================================
# Getting Camera Variables
#=================================
# Here we try to get the variables for the user's camera.
# This includes ip_addr,uname and password
with st.form(key="ip_cctv_connect"):
st.write("Please enter the credentials for your ONVIF Capable IP Camera.")
ip_address=st.text_input("Enter your camera's IP Address:")
username=st.text_input("Enter your Camera's Username:")
password=st.text_input("Enter your camera's password:")
command=st.text_input("Enter the image processing command: ")
cmd_connect=st.form_submit_button(label="Connect!")
#=====================================
# Disconnect Button
#==========================================
cmd_disconnect=st.button("Disconnect!")
#===============================
# URLLIB 3 HTTP OBject
#===============================
http=urllib3.PoolManager()
#===============================
# Streamlit Placeholders
#===============================
#Create the Place Holders
img_ph_1=st.image([])
img_ph_2=st.image([])
def grab_frame_cctv():
#http://admin:[email protected]/tmpfs/auto.jpg
_url="http://{0}:{1}@{2}/tmpfs/auto.jpg".format(username,password,ip_address)
img=Image.open(requests.get(_url,stream=True).raw)
cvFrame=np.array(img)
return cvFrame
if cmd_connect:
while True:
frame=grab_frame_cctv()
img_ph_1.image(frame)
img_ph_2.image(_yoloV4.DetectObjects_retFrameDetList(frame)[0])
if cmd_disconnect:
break
|
py | 1a3bceaa6f7db30f722df47529e9efab2e6acf44 | from sqlalchemy import orm
import datetime
from sqlalchemy import schema, types
metadata = schema.MetaData()
def now():
return datetime.datetime.now()
mac_table = schema.Table('nac_mactable', metadata,
schema.Column('id', types.Integer, primary_key=True),
schema.Column('mac', types.String(32)),
schema.Column('ip', types.String(64)),
schema.Column('state', types.String(64)),
schema.Column('user_id', types.String(32)),
schema.Column('dpid', types.String(32)),
schema.Column('port', types.Integer),
)
switch = schema.Table('switch', metadata,
schema.Column('id', types.Integer, primary_key=True),
schema.Column('dpid', types.String(32)),
schema.Column('name', types.String(128)),
schema.Column('os', types.String(128)),
)
link = schema.Table('link', metadata,
schema.Column('id', types.Integer, primary_key=True),
schema.Column('a_dpid', types.String(32)),
schema.Column('z_dpid', types.String(32)),
schema.Column('a_port', types.Integer),
schema.Column('z_port', types.Integer),
)
class NAC_MacTable(object):
pass
class Switch(object):
pass
class Link(object):
pass
orm.mapper(NAC_MacTable, mac_table)
orm.mapper(Switch, switch)
orm.mapper(Link, link)
|
py | 1a3bcebe8dc693a1b508e8af7161a7b1be854b79 | #!/usr/bin/python3
#
# Copyright (c) Siemens AG, 2020
# [email protected]
#
# SPDX-License-Identifier: MIT
#
#
# NOTE this was tested on Python 3.6.9
# NOTE subprocess seems to return empty stdout when ASan reports an error
import sys
import os
import os.path
import signal
import subprocess
import uuid
from pprint import pprint as pp
import util
import results
import config
def check():
"""Check security issues according to config and pass results to next tools."""
overall_report = dict()
# source code analysis
# ====================
# currently empty
# compile
# =======
ret_makefile = subprocess.run([config.compiler] + config.compiler_args, # command
stdout=subprocess.PIPE, # capture stdout
stderr=subprocess.PIPE, # capture stderr
universal_newlines=True) # use text mode for std* file objects
overall_report['makefile'] = ret_makefile
# runtime analysis
# ================
with open('compile.txt', 'r') as f:
if 'error' not in f.read().lower(): # if compilation succeeded
overall_report, test_case_report_list = runtime_analysis(config, overall_report)
# pass this info to next tools for subsequent processing
# ======================================================
pp(overall_report)
# results from runtime analysis
if 'runtime_analysis_done' in overall_report:
success_count = 0
for report in test_case_report_list:
if 'timeout' in report:
util.addFinding("Time limit exceeded!", 0, "", "TEST_080006")
elif report['return_code'] != 0:
if report['stderr_stream'] != '': # ASan/LeakSan/Stack protector probably reported something
pass # but these findings will be added by analyze.py
else:
util.addFinding("It seems your program might have crashed.", 0,"","TEST_100006")
# output_match == None means the user might have tried to print to outfile
elif report['stdout_stream'] != '' or report['output_match'] is None:
util.addFinding("A test case failed! Make sure you are not trying to print something.",
0,"","TEST_100006")
elif not all(report['output_match']): # not all test cases passed
util.addFinding("A test case failed!", 0, "", "TEST_100006")
else:
success_count += 1
with open('stderr.txt', 'a') as f:
f.write(report['stderr_stream'])
with open('stdout.txt', 'a') as f:
f.write(report['outfile'])
if success_count == len(test_case_report_list):
util.addFinding("Program behaves as expected!", 1, "CHALLENGE_PASS", "TEST_900006")
util.dumpFindings()
# next tools
subprocess.run(["./analyse.py"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
subprocess.run(["./ai.py"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def runtime_analysis(config, overall_report):
"""Run test suites on executable and return a list containing the result of each test suite.
Each list item is a dictionary describing the result of running that test suite.
"""
test_case_report_list = []
for test_suite in config.get_test_suite():
report = dict()
report['stdout_stream'] = ''
report['stderr_stream'] = ''
report['outfile'] = ''
input_for_stdin = config.get_test_suite_input_for_stdin(test_suite)
# using Popen instead of run because I need access to the pid
# See comment under "except subprocess.TimeoutExpired:"
infile = "xinfile_" + uuid.uuid4().hex[0:16] + ".txt"
outfile = "xoutfile_" + uuid.uuid4().hex[0:16] + ".txt"
p = subprocess.Popen(['./run_jail.sh',
config.output_filename,
str(len(test_suite)), infile, outfile], # command
stdout=subprocess.PIPE, # capture stdout
stderr=subprocess.PIPE, # capture stderr
stdin=subprocess.PIPE, # capture stdin
universal_newlines=True, # use text mode for std* file objects
start_new_session=True, # otherwise killing the process group will also kill the Python interpreter
)
try:
# send test suite input
with open(infile, "w") as f:
f.write(input_for_stdin)
(stdout_stream, stderr_stream) = p.communicate(timeout=config.timeout)
report['return_code'] = p.returncode
report['stderr_stream'] += stderr_stream
report['stdout_stream'] += stdout_stream
with open(outfile, "r") as f:
current_outfile = f.read()
report['outfile'] += current_outfile
# check if test cases passed
ret_output_match = config.check_for_output_match(current_outfile, test_suite)
report['test_suite'] = test_suite
report['output_match'] = ret_output_match
except subprocess.TimeoutExpired:
# kill the process group so that all child processes spawned by the process are also killed
# The child need to be killed because, in addition to wasting CPU cycles,
# it can hold stdout and then Python will wait indefinitely even if the timeout is expired
os.killpg(os.getpgid(p.pid), signal.SIGKILL)
report['timeout'] = True
finally:
test_case_report_list.append(report)
overall_report['runtime_analysis_done'] = True
return overall_report, test_case_report_list
if __name__ == '__main__':
try:
check() # run checker
except Exception as e:
print("EXCEPTION IN CHECKER: " + str(e))
util.dumpFindings();
|
py | 1a3bcee239be4311df0854931dea6f7515fa46e6 | from uuid import uuid4
class Init():
# web 주소
_initPath = "http://127.0.0.1"
# TCPServerApi 주소
_commandPath = "http://127.0.0.1:9000"
# TCPSever 주소
_host = '192.168.219.102'
_port = 9009
@classmethod
def getInitPath(cls):
return cls._initPath
@classmethod
def getCommandPath(cls):
return cls._commandPath
@classmethod
def getHost(cls):
return cls._host
@classmethod
def getPort(cls):
return cls._port
|
py | 1a3bcf08e0604c915329e867a385fc96509388ae | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from django.conf import settings
from django.test import TestCase, override_settings
from unittest import skip
from zerver.lib.avatar import avatar_url
from zerver.lib.bugdown import url_filename
from zerver.lib.test_helpers import AuthedTestCase
from zerver.lib.test_runner import slow
from zerver.lib.upload import sanitize_name, S3UploadBackend, \
upload_message_image, delete_message_image, LocalUploadBackend
import zerver.lib.upload
from zerver.models import Attachment, Recipient, get_user_profile_by_email, \
get_old_unclaimed_attachments, Message, UserProfile
from zerver.lib.actions import do_delete_old_unclaimed_attachments
import ujson
from six.moves import urllib
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from six.moves import StringIO
import os
import shutil
import re
import datetime
import requests
import base64
from datetime import timedelta
from django.utils import timezone
from moto import mock_s3
TEST_AVATAR_DIR = os.path.join(os.path.dirname(__file__), 'images')
def destroy_uploads():
# type: () -> None
if os.path.exists(settings.LOCAL_UPLOADS_DIR):
shutil.rmtree(settings.LOCAL_UPLOADS_DIR)
class FileUploadTest(AuthedTestCase):
def test_rest_endpoint(self):
# type: () -> None
"""
Tests the /api/v1/user_uploads api endpoint. Here a single file is uploaded
and downloaded using a username and api_key
"""
fp = StringIO("zulip!")
fp.name = "zulip.txt"
# Upload file via API
auth_headers = self.api_auth('[email protected]')
result = self.client.post('/api/v1/user_uploads', {'file': fp}, **auth_headers)
json = ujson.loads(result.content)
self.assertIn("uri", json)
uri = json["uri"]
base = '/user_uploads/'
self.assertEquals(base, uri[:len(base)])
# Download file via API
self.client.post('/accounts/logout/')
response = self.client.get(uri, **auth_headers)
data = b"".join(response.streaming_content)
self.assertEquals(b"zulip!", data)
# Files uploaded through the API should be accesible via the web client
self.login("[email protected]")
response = self.client.get(uri)
data = b"".join(response.streaming_content)
self.assertEquals(b"zulip!", data)
def test_multiple_upload_failure(self):
# type: () -> None
"""
Attempting to upload two files should fail.
"""
self.login("[email protected]")
fp = StringIO("bah!")
fp.name = "a.txt"
fp2 = StringIO("pshaw!")
fp2.name = "b.txt"
result = self.client.post("/json/upload_file", {'f1': fp, 'f2': fp2})
self.assert_json_error(result, "You may only upload one file at a time")
def test_no_file_upload_failure(self):
# type: () -> None
"""
Calling this endpoint with no files should fail.
"""
self.login("[email protected]")
result = self.client.post("/json/upload_file")
self.assert_json_error(result, "You must specify a file to upload")
# This test will go through the code path for uploading files onto LOCAL storage
# when zulip is in DEVELOPMENT mode.
def test_file_upload_authed(self):
# type: () -> None
"""
A call to /json/upload_file should return a uri and actually create an
entry in the database. This entry will be marked unclaimed till a message
refers it.
"""
self.login("[email protected]")
fp = StringIO("zulip!")
fp.name = "zulip.txt"
result = self.client.post("/json/upload_file", {'file': fp})
self.assert_json_success(result)
json = ujson.loads(result.content)
self.assertIn("uri", json)
uri = json["uri"]
base = '/user_uploads/'
self.assertEquals(base, uri[:len(base)])
# In the future, local file requests will follow the same style as S3
# requests; they will be first authenthicated and redirected
response = self.client.get(uri)
data = b"".join(response.streaming_content)
self.assertEquals(b"zulip!", data)
# check if DB has attachment marked as unclaimed
entry = Attachment.objects.get(file_name='zulip.txt')
self.assertEquals(entry.is_claimed(), False)
self.subscribe_to_stream("[email protected]", "Denmark")
body = "First message ...[zulip.txt](http://localhost:9991" + uri + ")"
self.send_message("[email protected]", "Denmark", Recipient.STREAM, body, "test")
self.assertIn('title="zulip.txt"', self.get_last_message().rendered_content)
def test_delete_old_unclaimed_attachments(self):
# type: () -> None
# Upload some files and make them older than a weeek
self.login("[email protected]")
d1 = StringIO("zulip!")
d1.name = "dummy_1.txt"
result = self.client.post("/json/upload_file", {'file': d1})
json = ujson.loads(result.content)
uri = json["uri"]
d1_path_id = re.sub('/user_uploads/', '', uri)
d2 = StringIO("zulip!")
d2.name = "dummy_2.txt"
result = self.client.post("/json/upload_file", {'file': d2})
json = ujson.loads(result.content)
uri = json["uri"]
d2_path_id = re.sub('/user_uploads/', '', uri)
two_week_ago = timezone.now() - datetime.timedelta(weeks=2)
d1_attachment = Attachment.objects.get(path_id = d1_path_id)
d1_attachment.create_time = two_week_ago
d1_attachment.save()
d2_attachment = Attachment.objects.get(path_id = d2_path_id)
d2_attachment.create_time = two_week_ago
d2_attachment.save()
# Send message refering only dummy_1
self.subscribe_to_stream("[email protected]", "Denmark")
body = "Some files here ...[zulip.txt](http://localhost:9991/user_uploads/" + d1_path_id + ")"
self.send_message("[email protected]", "Denmark", Recipient.STREAM, body, "test")
# dummy_2 should not exist in database or the uploads folder
do_delete_old_unclaimed_attachments(2)
self.assertTrue(not Attachment.objects.filter(path_id = d2_path_id).exists())
self.assertTrue(not delete_message_image(d2_path_id))
def test_multiple_claim_attachments(self):
# type: () -> None
"""
This test tries to claim the same attachment twice. The messages field in
the Attachment model should have both the messages in its entry.
"""
self.login("[email protected]")
d1 = StringIO("zulip!")
d1.name = "dummy_1.txt"
result = self.client.post("/json/upload_file", {'file': d1})
json = ujson.loads(result.content)
uri = json["uri"]
d1_path_id = re.sub('/user_uploads/', '', uri)
self.subscribe_to_stream("[email protected]", "Denmark")
body = "First message ...[zulip.txt](http://localhost:9991/user_uploads/" + d1_path_id + ")"
self.send_message("[email protected]", "Denmark", Recipient.STREAM, body, "test")
body = "Second message ...[zulip.txt](http://localhost:9991/user_uploads/" + d1_path_id + ")"
self.send_message("[email protected]", "Denmark", Recipient.STREAM, body, "test")
self.assertEquals(Attachment.objects.get(path_id=d1_path_id).messages.count(), 2)
def test_check_attachment_reference_update(self):
f1 = StringIO("file1")
f1.name = "file1.txt"
f2 = StringIO("file2")
f2.name = "file2.txt"
f3 = StringIO("file3")
f3.name = "file3.txt"
self.login("[email protected]")
result = self.client.post("/json/upload_file", {'file': f1})
json = ujson.loads(result.content)
uri = json["uri"]
f1_path_id = re.sub('/user_uploads/', '', uri)
result = self.client.post("/json/upload_file", {'file': f2})
json = ujson.loads(result.content)
uri = json["uri"]
f2_path_id = re.sub('/user_uploads/', '', uri)
self.subscribe_to_stream("[email protected]", "test")
body = ("[f1.txt](http://localhost:9991/user_uploads/" + f1_path_id + ")"
"[f2.txt](http://localhost:9991/user_uploads/" + f2_path_id + ")")
msg_id = self.send_message("[email protected]", "test", Recipient.STREAM, body, "test")
result = self.client.post("/json/upload_file", {'file': f3})
json = ujson.loads(result.content)
uri = json["uri"]
f3_path_id = re.sub('/user_uploads/', '', uri)
new_body = ("[f3.txt](http://localhost:9991/user_uploads/" + f3_path_id + ")"
"[f2.txt](http://localhost:9991/user_uploads/" + f2_path_id + ")")
result = self.client.post("/json/update_message", {
'message_id': msg_id,
'content': new_body
})
self.assert_json_success(result)
message = Message.objects.get(id=msg_id)
f1_attachment = Attachment.objects.get(path_id=f1_path_id)
f2_attachment = Attachment.objects.get(path_id=f2_path_id)
f3_attachment = Attachment.objects.get(path_id=f2_path_id)
self.assertTrue(message not in f1_attachment.messages.all())
self.assertTrue(message in f2_attachment.messages.all())
self.assertTrue(message in f3_attachment.messages.all())
def tearDown(self):
# type: () -> None
destroy_uploads()
class AvatarTest(AuthedTestCase):
def test_multiple_upload_failure(self):
# type: () -> None
"""
Attempting to upload two files should fail.
"""
self.login("[email protected]")
fp1 = open(os.path.join(TEST_AVATAR_DIR, 'img.png'), 'rb')
fp2 = open(os.path.join(TEST_AVATAR_DIR, 'img.png'), 'rb')
result = self.client.post("/json/set_avatar", {'f1': fp1, 'f2': fp2})
self.assert_json_error(result, "You must upload exactly one avatar.")
def test_no_file_upload_failure(self):
# type: () -> None
"""
Calling this endpoint with no files should fail.
"""
self.login("[email protected]")
result = self.client.post("/json/set_avatar")
self.assert_json_error(result, "You must upload exactly one avatar.")
correct_files = [
('img.png', 'png_resized.png'),
('img.gif', 'gif_resized.png'),
('img.tif', 'tif_resized.png')
]
corrupt_files = ['text.txt', 'corrupt.png', 'corrupt.gif']
def test_get_gravatar_avatar(self):
# type: () -> None
self.login("[email protected]")
cordelia = get_user_profile_by_email('[email protected]')
cordelia.avatar_source = UserProfile.AVATAR_FROM_GRAVATAR
cordelia.save()
with self.settings(ENABLE_GRAVATAR=True):
response = self.client.get("/avatar/[email protected]?foo=bar")
redirect_url = response['Location']
self.assertEqual(redirect_url, avatar_url(cordelia) + '&foo=bar')
with self.settings(ENABLE_GRAVATAR=False):
response = self.client.get("/avatar/[email protected]?foo=bar")
redirect_url = response['Location']
self.assertTrue(redirect_url.endswith(avatar_url(cordelia) + '&foo=bar'))
def test_get_user_avatar(self):
# type: () -> None
self.login("[email protected]")
cordelia = get_user_profile_by_email('[email protected]')
cordelia.avatar_source = UserProfile.AVATAR_FROM_USER
cordelia.save()
response = self.client.get("/avatar/[email protected]?foo=bar")
redirect_url = response['Location']
self.assertTrue(redirect_url.endswith(avatar_url(cordelia) + '&foo=bar'))
def test_get_system_generated_avatar(self):
# type: () -> None
self.login("[email protected]")
cordelia = get_user_profile_by_email('[email protected]')
cordelia.avatar_source = UserProfile.AVATAR_FROM_SYSTEM
cordelia.save()
response = self.client.get("/avatar/[email protected]?foo=bar")
redirect_url = response['Location']
self.assertTrue(redirect_url.endswith(avatar_url(cordelia) + '&foo=bar'))
def test_non_valid_user_avatar(self):
# type: () -> None
# It's debatable whether we should generate avatars for non-users,
# but this test just validates the current code's behavior.
self.login("[email protected]")
response = self.client.get("/avatar/[email protected]?foo=bar")
redirect_url = response['Location']
actual_url = 'https://secure.gravatar.com/avatar/444258b521f152129eb0c162996e572d?d=identicon&foo=bar'
self.assertEqual(redirect_url, actual_url)
def test_valid_avatars(self):
# type: () -> None
"""
A call to /json/set_avatar with a valid file should return a url and actually create an avatar.
"""
for fname, rfname in self.correct_files:
# TODO: use self.subTest once we're exclusively on python 3 by uncommenting the line below.
# with self.subTest(fname=fname):
self.login("[email protected]")
fp = open(os.path.join(TEST_AVATAR_DIR, fname), 'rb')
result = self.client.post("/json/set_avatar", {'file': fp})
self.assert_json_success(result)
json = ujson.loads(result.content)
self.assertIn("avatar_url", json)
url = json["avatar_url"]
base = '/user_avatars/'
self.assertEquals(base, url[:len(base)])
rfp = open(os.path.join(TEST_AVATAR_DIR, rfname), 'rb')
response = self.client.get(url)
data = b"".join(response.streaming_content)
self.assertEquals(rfp.read(), data)
def test_invalid_avatars(self):
# type: () -> None
"""
A call to /json/set_avatar with an invalid file should fail.
"""
for fname in self.corrupt_files:
# with self.subTest(fname=fname):
self.login("[email protected]")
fp = open(os.path.join(TEST_AVATAR_DIR, fname), 'rb')
result = self.client.post("/json/set_avatar", {'file': fp})
self.assert_json_error(result, "Could not decode avatar image; did you upload an image file?")
def tearDown(self):
# type: () -> None
destroy_uploads()
class LocalStorageTest(AuthedTestCase):
def test_file_upload_local(self):
# type: () -> None
sender_email = "[email protected]"
user_profile = get_user_profile_by_email(sender_email)
uri = upload_message_image(u'dummy.txt', u'text/plain', b'zulip!', user_profile)
base = '/user_uploads/'
self.assertEquals(base, uri[:len(base)])
path_id = re.sub('/user_uploads/', '', uri)
file_path = os.path.join(settings.LOCAL_UPLOADS_DIR, 'files', path_id)
self.assertTrue(os.path.isfile(file_path))
def test_delete_message_image_local(self):
# type: () -> None
self.login("[email protected]")
fp = StringIO("zulip!")
fp.name = "zulip.txt"
result = self.client.post("/json/upload_file", {'file': fp})
json = ujson.loads(result.content)
uri = json["uri"]
path_id = re.sub('/user_uploads/', '', uri)
self.assertTrue(delete_message_image(path_id))
def tearDown(self):
# type: () -> None
destroy_uploads()
def use_s3_backend(method):
@mock_s3
@override_settings(LOCAL_UPLOADS_DIR=None)
def new_method(*args, **kwargs):
zerver.lib.upload.upload_backend = S3UploadBackend()
try:
return method(*args, **kwargs)
finally:
zerver.lib.upload.upload_backend = LocalUploadBackend()
return new_method
class S3Test(AuthedTestCase):
@use_s3_backend
def test_file_upload_s3(self):
# type: () -> None
conn = S3Connection(settings.S3_KEY, settings.S3_SECRET_KEY)
bucket = conn.create_bucket(settings.S3_AUTH_UPLOADS_BUCKET)
sender_email = "[email protected]"
user_profile = get_user_profile_by_email(sender_email)
uri = upload_message_image(u'dummy.txt', u'text/plain', b'zulip!', user_profile)
base = '/user_uploads/'
self.assertEquals(base, uri[:len(base)])
path_id = re.sub('/user_uploads/', '', uri)
self.assertEquals(b"zulip!", bucket.get_key(path_id).get_contents_as_string())
self.subscribe_to_stream("[email protected]", "Denmark")
body = "First message ...[zulip.txt](http://localhost:9991" + uri + ")"
self.send_message("[email protected]", "Denmark", Recipient.STREAM, body, "test")
self.assertIn('title="dummy.txt"', self.get_last_message().rendered_content)
@use_s3_backend
def test_message_image_delete_s3(self):
# type: () -> None
conn = S3Connection(settings.S3_KEY, settings.S3_SECRET_KEY)
conn.create_bucket(settings.S3_AUTH_UPLOADS_BUCKET)
sender_email = "[email protected]"
user_profile = get_user_profile_by_email(sender_email)
uri = upload_message_image(u'dummy.txt', u'text/plain', b'zulip!', user_profile)
path_id = re.sub('/user_uploads/', '', uri)
self.assertTrue(delete_message_image(path_id))
@use_s3_backend
def test_file_upload_authed(self):
# type: () -> None
"""
A call to /json/upload_file should return a uri and actually create an object.
"""
conn = S3Connection(settings.S3_KEY, settings.S3_SECRET_KEY)
conn.create_bucket(settings.S3_AUTH_UPLOADS_BUCKET)
self.login("[email protected]")
fp = StringIO("zulip!")
fp.name = "zulip.txt"
result = self.client.post("/json/upload_file", {'file': fp})
self.assert_json_success(result)
json = ujson.loads(result.content)
self.assertIn("uri", json)
uri = json["uri"]
base = '/user_uploads/'
self.assertEquals(base, uri[:len(base)])
response = self.client.get(uri)
redirect_url = response['Location']
self.assertEquals(b"zulip!", urllib.request.urlopen(redirect_url).read().strip())
self.subscribe_to_stream("[email protected]", "Denmark")
body = "First message ...[zulip.txt](http://localhost:9991" + uri + ")"
self.send_message("[email protected]", "Denmark", Recipient.STREAM, body, "test")
self.assertIn('title="zulip.txt"', self.get_last_message().rendered_content)
class UploadTitleTests(TestCase):
def test_upload_titles(self):
# type: () -> None
self.assertEqual(url_filename("http://localhost:9991/user_uploads/1/LUeQZUG5jxkagzVzp1Ox_amr/dummy.txt"), "dummy.txt")
self.assertEqual(url_filename("http://localhost:9991/user_uploads/1/94/SzGYe0RFT-tEcOhQ6n-ZblFZ/zulip.txt"), "zulip.txt")
self.assertEqual(url_filename("https://zulip.com/user_uploads/4142/LUeQZUG5jxkagzVzp1Ox_amr/pasted_image.png"), "pasted_image.png")
self.assertEqual(url_filename("https://zulip.com/integrations"), "https://zulip.com/integrations")
self.assertEqual(url_filename("https://example.com"), "https://example.com")
class SanitizeNameTests(TestCase):
def test_file_name(self):
# type: () -> None
self.assertEquals(sanitize_name(u'test.txt'), u'test.txt')
self.assertEquals(sanitize_name(u'.hidden'), u'.hidden')
self.assertEquals(sanitize_name(u'.hidden.txt'), u'.hidden.txt')
self.assertEquals(sanitize_name(u'tarball.tar.gz'), u'tarball.tar.gz')
self.assertEquals(sanitize_name(u'.hidden_tarball.tar.gz'), u'.hidden_tarball.tar.gz')
self.assertEquals(sanitize_name(u'Testing{}*&*#().ta&&%$##&&r.gz'), u'Testing.tar.gz')
self.assertEquals(sanitize_name(u'*testingfile?*.txt'), u'testingfile.txt')
self.assertEquals(sanitize_name(u'snowman☃.txt'), u'snowman.txt')
self.assertEquals(sanitize_name(u'테스트.txt'), u'테스트.txt')
self.assertEquals(sanitize_name(u'~/."\`\?*"u0`000ssh/test.t**{}ar.gz'), u'.u0000sshtest.tar.gz')
|
py | 1a3bd0f0cc1bb9efc460feb4e62b0f0a1a2d6620 | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'merra_weather.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
py | 1a3bd1bc3261f356431a282cd4e186e943cfa436 | import numpy as np
import cv2
import math
import argparse
import time
def calculate_area(contours):
""" Calculate contour area
Paramters:
contours: List[numpy.ndarray]
Returns:
List[numpy.ndarray]: contours_area
"""
contours_area = []
# calculate area and filter into new array
for con in contours:
area = cv2.contourArea(con)
if 10000 < area < 60000:
contours_area.append(con)
return contours_area
def check_circularity(con):
""" Check circularity of contours and
calculate center coords and radius of resulting circle
Paramters:
con: numpy.ndarray
Returns:
float: circularity
int: cX
int: cY
int: r
"""
perimeter = cv2.arcLength(con, True)
area = cv2.contourArea(con)
if perimeter == 0:
return 0, 0, 0, 0
circularity = 4*math.pi*(area/(perimeter*perimeter))
M = cv2.moments(con)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
r = int(math.sqrt(area/(math.pi)))
return circularity, cX, cY, r
def detect_contour(gray, img):
""" Perform Gaussian Blur to smoothen image, binary threshold image to extract features
Detects contours on filtered image
Paramters:
gray: numpy.ndarray
img: numpy.ndarray
Returns:
img: numpy.ndarray
"""
filter_img = cv2.GaussianBlur(gray, (11, 11), 11)
#filter_img = cv2.bilateralFilter(img, 7, 50, 50)
_, thresh = cv2.threshold(filter_img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
_, contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours_area = calculate_area(contours)
contours_circles = []
# check if contour is of circular shape
for con in contours_area:
circularity, cX, cY, r = check_circularity(con)
if 0.7 < circularity < 1.1:
contours_circles.append(con)
cv2.circle(img, (cX, cY), 3, (0,255,0), 3)
else:
cv2.circle(img, (cX, cY), r, (0,255,0), 3)
cv2.circle(img, (cX, cY), 3, (0,255,0), 3)
cv2.drawContours(img, contours_circles, -1, (0, 255, 0), 3)
return img, filter_img, thresh
def extract_roi(img):
""" Extract region of interest in frame to perform image processing pipelines
Paramters:
img: numpy.ndarray
Returns:
numpy.ndarray: eye_ROI
"""
polygons = np.array([(300, 900), (300, 200), (1050, 200), (1050, 900)])
mask = np.zeros_like(img)
cv2.fillConvexPoly(mask, polygons, 255)
eye_ROI = cv2.bitwise_and(img, mask)
return eye_ROI
def fps_overlay(img, fps):
""" Overlay FPS onto output img
Paramters:
img: numpy.ndarray
fps: float
Returns:
numpy.ndarray: img
"""
text = "FPS: {:.2f}".format(fps)
return cv2.putText(img, text, (50, 50), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 2)
def image_reader(filename, image_layers):
""" Image reader and performs image processing pipeline on
image file
Paramters:
filename: str
"""
img = cv2.imread(filename)
img_cp = img.copy()
gray = cv2.cvtColor(img_cp, cv2.COLOR_BGR2GRAY)
gray = extract_roi(gray)
img_cp, filter_img, thresh = detect_contour(gray, img_cp)
if image_layers:
cv2.namedWindow("ROI")
cv2.namedWindow("Gaussian Blur")
cv2.namedWindow("Thresholding")
cv2.namedWindow("Output")
cv2.imshow("ROI", gray)
cv2.imshow("Gaussian Blur", filter_img)
cv2.imshow("Thresholding", thresh)
cv2.imshow("Output", img_cp)
else:
cv2.imshow("Output", img_cp)
key = cv2.waitKey(0)
if key == ord('q'):
cv2.destroyAllWindows()
def video_reader(filename, image_layers):
""" Video capture reader and performs image processing pipeline on
captured frames
Paramters:
filename: str
"""
cap = cv2.VideoCapture(filename)
while(True):
ret, img = cap.read()
tic = time.time()
if not ret:
break
img_cp = img.copy()
gray = cv2.cvtColor(img_cp, cv2.COLOR_BGR2GRAY)
gray = extract_roi(gray)
img_cp, filter_img, thresh = detect_contour(gray, img_cp)
toc = time.time()
fps = 1/(toc-tic)
img_cp = fps_overlay(img_cp, fps)
if image_layers:
cv2.namedWindow("ROI")
cv2.namedWindow("Gaussian Blur")
cv2.namedWindow("Thresholding")
cv2.namedWindow("Output")
cv2.imshow("ROI", gray)
cv2.imshow("Gaussian Blur", filter_img)
cv2.imshow("Thresholding", thresh)
cv2.imshow("Output", img_cp)
else:
cv2.imshow("Output", img_cp)
key = cv2.waitKey(1)
if key == ord('q'):
break
if key == ord('p'):
cv2.waitKey(-1)
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--input_format',
type=int,
dest='input_format',
default=1,
help='Image(0) or Video(1)')
parser.add_argument('--input_file',
type=str,
dest='input_file',
default='/home/indra/Documents/Telemedc/pupil_detector/assets/sample.mkv',
help='Path to input file (image or video)')
parser.add_argument('--image_layers',
type=bool,
dest='image_layers',
default=False,
help='Open CV Windows to see intermediate processing')
args = parser.parse_args()
if args.input_format:
video_reader(args.input_file, args.image_layers)
else:
image_reader(args.input_file, args.image_layers) |
py | 1a3bd229da8912fd24affb4f0f6bd7ba476e6366 | import matplotlib.pyplot as plt
import pymc3 as pm
import numpy as np
# import pydevd
# pydevd.set_pm_excepthook()
np.seterr(invalid='raise')
data = np.random.normal(size=(2, 20))
model = pm.Model()
with model:
x = pm.Normal('x', mu=.5, tau=2. ** -2, shape=(2, 1))
z = pm.Beta('z', alpha=10, beta=5.5)
d = pm.Normal('data', mu=x, tau=.75 ** -2, observed=data)
step = pm.NUTS()
def run(n=1000):
if n == "short":
n = 50
with model:
trace = pm.sample(n, step)
plt.subplot(2, 2, 1)
plt.plot(trace[x][:, 0, 0])
plt.subplot(2, 2, 2)
plt.hist(trace[x][:, 0, 0])
plt.subplot(2, 2, 3)
plt.plot(trace[x][:, 1, 0])
plt.subplot(2, 2, 4)
plt.hist(trace[x][:, 1, 0])
plt.show()
if __name__ == '__main__':
run()
|
py | 1a3bd241577e60f92dafb75552866e7b02d954bb | import numpy as np
import pandas as pd
import os
import sys
from scipy import sparse
import utils
PAPER_COUNT_FILE = sys.argv[1]
YEAR = int(sys.argv[3])
WINDOW_LENGTH = int(sys.argv[4])
OUTPUT_NODE_FILE = sys.argv[5]
OUTPUT_EDGE_FILE = sys.argv[6]
year = YEAR
if __name__ == "__main__":
# Connect to the database
graph = utils.get_db()
# Load the paper count
pcount = pd.read_csv(PAPER_COUNT_FILE, sep="\t")
# Count the number of papers for each journal
ys = YEAR - WINDOW_LENGTH
yf = YEAR
query = """
MATCH (jtrg:Journal)<-[:published_from]-(trg:Paper)<-[:cites]-(src:Paper {Year:%d})-[:published_from]->(jsrc:Journal)
where trg.Year<%d and trg.Year >= %d
return toInteger(jsrc.JournalId) as source, toInteger(jtrg.JournalId) as target, ID(trg) as p_target, ID(src) as s_target
""" % (
yf,
yf,
ys,
)
edges = graph.run(query).to_data_frame()
#print(query, edges)
# Make a node table
ccount = edges.groupby(["target"])["s_target"].nunique()
nodes = pd.DataFrame({"ccount": ccount})
nodes = nodes.reset_index().rename(columns={"target": "id"})
# Slice the paper counts between ys and yf
s = (ys <= pcount.year) & (pcount.year < yf)
_pcount = pcount[s].copy()
_pcount = _pcount.groupby("id").agg("sum")["pcount"].reset_index()
# Merge the pcount to the node table
nodes = pd.merge(left=nodes, right=_pcount, left_on="id", right_on="id", how="left")
# Uniqify and count
edges = edges.groupby(["source", "target"]).size().reset_index(name="w")
# Add citations from retracted papers
if year == 2010 or year == 2011:
if year == 2010:
added_edges = [
["medical science monitor", "cell transplantation", 445],
["the scientific world journal", "cell transplantation", 96],
["medical science monitor", "medical science monitor", 44],
["the scientific world journal", "the scientific world journal", 26],
]
elif year == 2011:
added_edges = [
["medical science monitor", "cell transplantation", 87],
["medical science monitor", "medical science monitor", 32],
["the scientific world journal", "cell transplantation", 109],
["the scientific world journal", "the scientific world journal", 29],
["cell transplantation", "technology and innovation", 24],
]
journal_list = list(
set([x[0] for x in added_edges] + [x[1] for x in added_edges])
)
query = """
MATCH (n:Journal)
WHERE n.NormalizedName in [{journals}]
return toInteger(n.JournalId) as id, n.NormalizedName as name
""".format(
journals=",".join(["'%s'" % x for x in journal_list])
)
node_table = graph.run(query).to_data_frame()
name2id = {x["name"]: x["id"] for i, x in node_table.iterrows()}
edge_list = [
{"source": name2id[x[0]], "target": name2id[x[1]], "w": x[2]}
for x in added_edges
]
added_edges = pd.DataFrame(edge_list)
edges = pd.concat([edges, added_edges], ignore_index=True)
# Save to the result
nodes.to_csv(OUTPUT_NODE_FILE, sep="\t")
edges.to_csv(OUTPUT_EDGE_FILE, sep="\t")
|
py | 1a3bd469f1aba94885bc75d4cceeaa0cbe24e883 | class Keys(Enum,IComparable,IFormattable,IConvertible):
"""
Specifies key codes and modifiers.
enum (flags) Keys,values: A (65),Add (107),Alt (262144),Apps (93),Attn (246),B (66),Back (8),BrowserBack (166),BrowserFavorites (171),BrowserForward (167),BrowserHome (172),BrowserRefresh (168),BrowserSearch (170),BrowserStop (169),C (67),Cancel (3),Capital (20),CapsLock (20),Clear (12),Control (131072),ControlKey (17),Crsel (247),D (68),D0 (48),D1 (49),D2 (50),D3 (51),D4 (52),D5 (53),D6 (54),D7 (55),D8 (56),D9 (57),Decimal (110),Delete (46),Divide (111),Down (40),E (69),End (35),Enter (13),EraseEof (249),Escape (27),Execute (43),Exsel (248),F (70),F1 (112),F10 (121),F11 (122),F12 (123),F13 (124),F14 (125),F15 (126),F16 (127),F17 (128),F18 (129),F19 (130),F2 (113),F20 (131),F21 (132),F22 (133),F23 (134),F24 (135),F3 (114),F4 (115),F5 (116),F6 (117),F7 (118),F8 (119),F9 (120),FinalMode (24),G (71),H (72),HanguelMode (21),HangulMode (21),HanjaMode (25),Help (47),Home (36),I (73),IMEAccept (30),IMEAceept (30),IMEConvert (28),IMEModeChange (31),IMENonconvert (29),Insert (45),J (74),JunjaMode (23),K (75),KanaMode (21),KanjiMode (25),KeyCode (65535),L (76),LaunchApplication1 (182),LaunchApplication2 (183),LaunchMail (180),LButton (1),LControlKey (162),Left (37),LineFeed (10),LMenu (164),LShiftKey (160),LWin (91),M (77),MButton (4),MediaNextTrack (176),MediaPlayPause (179),MediaPreviousTrack (177),MediaStop (178),Menu (18),Modifiers (-65536),Multiply (106),N (78),Next (34),NoName (252),None (0),NumLock (144),NumPad0 (96),NumPad1 (97),NumPad2 (98),NumPad3 (99),NumPad4 (100),NumPad5 (101),NumPad6 (102),NumPad7 (103),NumPad8 (104),NumPad9 (105),O (79),Oem1 (186),Oem102 (226),Oem2 (191),Oem3 (192),Oem4 (219),Oem5 (220),Oem6 (221),Oem7 (222),Oem8 (223),OemBackslash (226),OemClear (254),OemCloseBrackets (221),Oemcomma (188),OemMinus (189),OemOpenBrackets (219),OemPeriod (190),OemPipe (220),Oemplus (187),OemQuestion (191),OemQuotes (222),OemSemicolon (186),Oemtilde (192),P (80),Pa1 (253),Packet (231),PageDown (34),PageUp (33),Pause (19),Play (250),Print (42),PrintScreen (44),Prior (33),ProcessKey (229),Q (81),R (82),RButton (2),RControlKey (163),Return (13),Right (39),RMenu (165),RShiftKey (161),RWin (92),S (83),Scroll (145),Select (41),SelectMedia (181),Separator (108),Shift (65536),ShiftKey (16),Sleep (95),Snapshot (44),Space (32),Subtract (109),T (84),Tab (9),U (85),Up (38),V (86),VolumeDown (174),VolumeMute (173),VolumeUp (175),W (87),X (88),XButton1 (5),XButton2 (6),Y (89),Z (90),Zoom (251)
"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
A=None
Add=None
Alt=None
Apps=None
Attn=None
B=None
Back=None
BrowserBack=None
BrowserFavorites=None
BrowserForward=None
BrowserHome=None
BrowserRefresh=None
BrowserSearch=None
BrowserStop=None
C=None
Cancel=None
Capital=None
CapsLock=None
Clear=None
Control=None
ControlKey=None
Crsel=None
D=None
D0=None
D1=None
D2=None
D3=None
D4=None
D5=None
D6=None
D7=None
D8=None
D9=None
Decimal=None
Delete=None
Divide=None
Down=None
E=None
End=None
Enter=None
EraseEof=None
Escape=None
Execute=None
Exsel=None
F=None
F1=None
F10=None
F11=None
F12=None
F13=None
F14=None
F15=None
F16=None
F17=None
F18=None
F19=None
F2=None
F20=None
F21=None
F22=None
F23=None
F24=None
F3=None
F4=None
F5=None
F6=None
F7=None
F8=None
F9=None
FinalMode=None
G=None
H=None
HanguelMode=None
HangulMode=None
HanjaMode=None
Help=None
Home=None
I=None
IMEAccept=None
IMEAceept=None
IMEConvert=None
IMEModeChange=None
IMENonconvert=None
Insert=None
J=None
JunjaMode=None
K=None
KanaMode=None
KanjiMode=None
KeyCode=None
L=None
LaunchApplication1=None
LaunchApplication2=None
LaunchMail=None
LButton=None
LControlKey=None
Left=None
LineFeed=None
LMenu=None
LShiftKey=None
LWin=None
M=None
MButton=None
MediaNextTrack=None
MediaPlayPause=None
MediaPreviousTrack=None
MediaStop=None
Menu=None
Modifiers=None
Multiply=None
N=None
Next=None
NoName=None
None=None
NumLock=None
NumPad0=None
NumPad1=None
NumPad2=None
NumPad3=None
NumPad4=None
NumPad5=None
NumPad6=None
NumPad7=None
NumPad8=None
NumPad9=None
O=None
Oem1=None
Oem102=None
Oem2=None
Oem3=None
Oem4=None
Oem5=None
Oem6=None
Oem7=None
Oem8=None
OemBackslash=None
OemClear=None
OemCloseBrackets=None
Oemcomma=None
OemMinus=None
OemOpenBrackets=None
OemPeriod=None
OemPipe=None
Oemplus=None
OemQuestion=None
OemQuotes=None
OemSemicolon=None
Oemtilde=None
P=None
Pa1=None
Packet=None
PageDown=None
PageUp=None
Pause=None
Play=None
Print=None
PrintScreen=None
Prior=None
ProcessKey=None
Q=None
R=None
RButton=None
RControlKey=None
Return=None
Right=None
RMenu=None
RShiftKey=None
RWin=None
S=None
Scroll=None
Select=None
SelectMedia=None
Separator=None
Shift=None
ShiftKey=None
Sleep=None
Snapshot=None
Space=None
Subtract=None
T=None
Tab=None
U=None
Up=None
V=None
value__=None
VolumeDown=None
VolumeMute=None
VolumeUp=None
W=None
X=None
XButton1=None
XButton2=None
Y=None
Z=None
Zoom=None
|
py | 1a3bd6007f4b19b87ca55a1ae044e69b13d8db4e |
from pathlib import Path
import requests
import re
from one import params
from one.webclient import http_download_file
import SimpleITK as sitk
def download_histology_data(subject, lab):
if lab == 'hoferlab':
lab_temp = 'mrsicflogellab'
elif lab == 'churchlandlab_ucla':
lab_temp = 'churchlandlab'
else:
lab_temp = lab
par = params.get()
try:
FLAT_IRON_HIST_REL_PATH = Path('histology', lab_temp, subject,
'downsampledStacks_25', 'sample2ARA')
baseurl = (par.HTTP_DATA_SERVER + '/' + '/'.join(FLAT_IRON_HIST_REL_PATH.parts))
r = requests.get(baseurl, auth=(par.HTTP_DATA_SERVER_LOGIN, par.HTTP_DATA_SERVER_PWD))
r.raise_for_status()
except Exception as err:
print(err)
try:
subject_rem = subject.replace("_", "")
FLAT_IRON_HIST_REL_PATH = Path('histology', lab_temp, subject_rem,
'downsampledStacks_25', 'sample2ARA')
baseurl = (par.HTTP_DATA_SERVER + '/' + '/'.join(FLAT_IRON_HIST_REL_PATH.parts))
r = requests.get(baseurl, auth=(par.HTTP_DATA_SERVER_LOGIN, par.HTTP_DATA_SERVER_PWD))
r.raise_for_status()
except Exception as err:
print(err)
path_to_nrrd = None
return path_to_nrrd
tif_files = []
for line in r.text.splitlines():
result = re.findall('href="(.*).tif"', line)
if result:
tif_files.append(result[0] + '.tif')
CACHE_DIR = params.get_cache_dir().joinpath(lab, 'Subjects', subject, 'histology')
CACHE_DIR.mkdir(exist_ok=True, parents=True)
path_to_files = []
for file in tif_files:
path_to_image = Path(CACHE_DIR, file)
if not path_to_image.exists():
url = (baseurl + '/' + file)
http_download_file(url, cache_dir=CACHE_DIR,
username=par.HTTP_DATA_SERVER_LOGIN,
password=par.HTTP_DATA_SERVER_PWD)
path_to_nrrd = tif2nrrd(path_to_image)
path_to_files.append(path_to_nrrd)
if len(path_to_files) > 3:
path_to_files = path_to_files[1:3]
return path_to_files
def tif2nrrd(path_to_image):
path_to_nrrd = Path(path_to_image.parent, path_to_image.parts[-1][:-3] + 'nrrd')
if not path_to_nrrd.exists():
reader = sitk.ImageFileReader()
reader.SetImageIO("TIFFImageIO")
reader.SetFileName(str(path_to_image))
img = reader.Execute()
new_img = sitk.PermuteAxes(img, [2, 1, 0])
new_img = sitk.Flip(new_img, [True, False, False])
new_img.SetSpacing([1, 1, 1])
writer = sitk.ImageFileWriter()
writer.SetImageIO("NrrdImageIO")
writer.SetFileName(str(path_to_nrrd))
writer.Execute(new_img)
return path_to_nrrd
|
py | 1a3bd60c7bb40854b7505790f853ddc5836b7fbc | """
sphinx.domains.python
~~~~~~~~~~~~~~~~~~~~~
The Python domain.
:copyright: Copyright 2007-2021 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import builtins
import inspect
import re
import sys
import typing
import warnings
from inspect import Parameter
from typing import Any, Dict, Iterable, Iterator, List, NamedTuple, Optional, Tuple, Type, cast
from docutils import nodes
from docutils.nodes import Element, Node
from docutils.parsers.rst import directives
from docutils.parsers.rst.states import Inliner
from sphinx import addnodes
from sphinx.addnodes import desc_signature, pending_xref, pending_xref_condition
from sphinx.application import Sphinx
from sphinx.builders import Builder
from sphinx.deprecation import RemovedInSphinx50Warning
from sphinx.directives import ObjectDescription
from sphinx.domains import Domain, Index, IndexEntry, ObjType
from sphinx.environment import BuildEnvironment
from sphinx.locale import _, __
from sphinx.pycode.ast import ast
from sphinx.pycode.ast import parse as ast_parse
from sphinx.roles import XRefRole
from sphinx.util import logging
from sphinx.util.docfields import Field, GroupedField, TypedField
from sphinx.util.docutils import SphinxDirective
from sphinx.util.inspect import signature_from_str
from sphinx.util.nodes import find_pending_xref_condition, make_id, make_refnode
from sphinx.util.typing import OptionSpec, TextlikeNode
logger = logging.getLogger(__name__)
# REs for Python signatures
py_sig_re = re.compile(
r'''^ ([\w.]*\.)? # class name(s)
(\w+) \s* # thing name
(?: \(\s*(.*)\s*\) # optional: arguments
(?:\s* -> \s* (.*))? # return annotation
)? $ # and nothing more
''', re.VERBOSE)
pairindextypes = {
'module': _('module'),
'keyword': _('keyword'),
'operator': _('operator'),
'object': _('object'),
'exception': _('exception'),
'statement': _('statement'),
'builtin': _('built-in function'),
}
class ObjectEntry(NamedTuple):
docname: str
node_id: str
objtype: str
aliased: bool
class ModuleEntry(NamedTuple):
docname: str
node_id: str
synopsis: str
platform: str
deprecated: bool
def type_to_xref(text: str, env: BuildEnvironment = None) -> addnodes.pending_xref:
"""Convert a type string to a cross reference node."""
if text == 'None':
reftype = 'obj'
else:
reftype = 'class'
if env:
kwargs = {'py:module': env.ref_context.get('py:module'),
'py:class': env.ref_context.get('py:class')}
else:
kwargs = {}
if env.config.python_use_unqualified_type_names:
# Note: It would be better to use qualname to describe the object to support support
# nested classes. But python domain can't access the real python object because this
# module should work not-dynamically.
shortname = text.split('.')[-1]
contnodes: List[Node] = [pending_xref_condition('', shortname, condition='resolved'),
pending_xref_condition('', text, condition='*')]
else:
contnodes = [nodes.Text(text)]
return pending_xref('', *contnodes,
refdomain='py', reftype=reftype, reftarget=text, **kwargs)
def _parse_annotation(annotation: str, env: BuildEnvironment = None) -> List[Node]:
"""Parse type annotation."""
def unparse(node: ast.AST) -> List[Node]:
if isinstance(node, ast.Attribute):
return [nodes.Text("%s.%s" % (unparse(node.value)[0], node.attr))]
elif isinstance(node, ast.BinOp):
result: List[Node] = unparse(node.left)
result.extend(unparse(node.op))
result.extend(unparse(node.right))
return result
elif isinstance(node, ast.BitOr):
return [addnodes.desc_sig_space(),
addnodes.desc_sig_punctuation('', '|'),
addnodes.desc_sig_space()]
elif isinstance(node, ast.Constant): # type: ignore
if node.value is Ellipsis:
return [addnodes.desc_sig_punctuation('', "...")]
elif isinstance(node.value, bool):
return [addnodes.desc_sig_keyword('', repr(node.value))]
elif isinstance(node.value, int):
return [addnodes.desc_sig_literal_number('', repr(node.value))]
elif isinstance(node.value, str):
return [addnodes.desc_sig_literal_string('', repr(node.value))]
else:
# handles None, which is further handled by type_to_xref later
# and fallback for other types that should be converted
return [nodes.Text(repr(node.value))]
elif isinstance(node, ast.Expr):
return unparse(node.value)
elif isinstance(node, ast.Index):
return unparse(node.value)
elif isinstance(node, ast.List):
result = [addnodes.desc_sig_punctuation('', '[')]
if node.elts:
# check if there are elements in node.elts to only pop the
# last element of result if the for-loop was run at least
# once
for elem in node.elts:
result.extend(unparse(elem))
result.append(addnodes.desc_sig_punctuation('', ','))
result.append(addnodes.desc_sig_space())
result.pop()
result.pop()
result.append(addnodes.desc_sig_punctuation('', ']'))
return result
elif isinstance(node, ast.Module):
return sum((unparse(e) for e in node.body), [])
elif isinstance(node, ast.Name):
return [nodes.Text(node.id)]
elif isinstance(node, ast.Subscript):
result = unparse(node.value)
result.append(addnodes.desc_sig_punctuation('', '['))
result.extend(unparse(node.slice))
result.append(addnodes.desc_sig_punctuation('', ']'))
# Wrap the Text nodes inside brackets by literal node if the subscript is a Literal
if result[0] in ('Literal', 'typing.Literal'):
for i, subnode in enumerate(result[1:], start=1):
if isinstance(subnode, nodes.Text):
result[i] = nodes.literal('', '', subnode)
return result
elif isinstance(node, ast.Tuple):
if node.elts:
result = []
for elem in node.elts:
result.extend(unparse(elem))
result.append(addnodes.desc_sig_punctuation('', ','))
result.append(addnodes.desc_sig_space())
result.pop()
result.pop()
else:
result = [addnodes.desc_sig_punctuation('', '('),
addnodes.desc_sig_punctuation('', ')')]
return result
else:
if sys.version_info < (3, 8):
if isinstance(node, ast.Ellipsis):
return [addnodes.desc_sig_punctuation('', "...")]
elif isinstance(node, ast.NameConstant):
return [nodes.Text(node.value)]
raise SyntaxError # unsupported syntax
if env is None:
warnings.warn("The env parameter for _parse_annotation becomes required now.",
RemovedInSphinx50Warning, stacklevel=2)
try:
tree = ast_parse(annotation)
result = unparse(tree)
for i, node in enumerate(result):
if isinstance(node, nodes.literal):
result[i] = node[0]
elif isinstance(node, nodes.Text) and node.strip():
result[i] = type_to_xref(str(node), env)
return result
except SyntaxError:
return [type_to_xref(annotation, env)]
def _parse_arglist(arglist: str, env: BuildEnvironment = None) -> addnodes.desc_parameterlist:
"""Parse a list of arguments using AST parser"""
params = addnodes.desc_parameterlist(arglist)
sig = signature_from_str('(%s)' % arglist)
last_kind = None
for param in sig.parameters.values():
if param.kind != param.POSITIONAL_ONLY and last_kind == param.POSITIONAL_ONLY:
# PEP-570: Separator for Positional Only Parameter: /
params += addnodes.desc_parameter('', '', addnodes.desc_sig_operator('', '/'))
if param.kind == param.KEYWORD_ONLY and last_kind in (param.POSITIONAL_OR_KEYWORD,
param.POSITIONAL_ONLY,
None):
# PEP-3102: Separator for Keyword Only Parameter: *
params += addnodes.desc_parameter('', '', addnodes.desc_sig_operator('', '*'))
node = addnodes.desc_parameter()
if param.kind == param.VAR_POSITIONAL:
node += addnodes.desc_sig_operator('', '*')
node += addnodes.desc_sig_name('', param.name)
elif param.kind == param.VAR_KEYWORD:
node += addnodes.desc_sig_operator('', '**')
node += addnodes.desc_sig_name('', param.name)
else:
node += addnodes.desc_sig_name('', param.name)
if param.annotation is not param.empty:
children = _parse_annotation(param.annotation, env)
node += addnodes.desc_sig_punctuation('', ':')
node += addnodes.desc_sig_space()
node += addnodes.desc_sig_name('', '', *children) # type: ignore
if param.default is not param.empty:
if param.annotation is not param.empty:
node += addnodes.desc_sig_space()
node += addnodes.desc_sig_operator('', '=')
node += addnodes.desc_sig_space()
else:
node += addnodes.desc_sig_operator('', '=')
node += nodes.inline('', param.default, classes=['default_value'],
support_smartquotes=False)
params += node
last_kind = param.kind
if last_kind == Parameter.POSITIONAL_ONLY:
# PEP-570: Separator for Positional Only Parameter: /
params += addnodes.desc_parameter('', '', addnodes.desc_sig_operator('', '/'))
return params
def _pseudo_parse_arglist(signode: desc_signature, arglist: str) -> None:
""""Parse" a list of arguments separated by commas.
Arguments can have "optional" annotations given by enclosing them in
brackets. Currently, this will split at any comma, even if it's inside a
string literal (e.g. default argument value).
"""
paramlist = addnodes.desc_parameterlist()
stack: List[Element] = [paramlist]
try:
for argument in arglist.split(','):
argument = argument.strip()
ends_open = ends_close = 0
while argument.startswith('['):
stack.append(addnodes.desc_optional())
stack[-2] += stack[-1]
argument = argument[1:].strip()
while argument.startswith(']'):
stack.pop()
argument = argument[1:].strip()
while argument.endswith(']') and not argument.endswith('[]'):
ends_close += 1
argument = argument[:-1].strip()
while argument.endswith('['):
ends_open += 1
argument = argument[:-1].strip()
if argument:
stack[-1] += addnodes.desc_parameter(
'', '', addnodes.desc_sig_name(argument, argument))
while ends_open:
stack.append(addnodes.desc_optional())
stack[-2] += stack[-1]
ends_open -= 1
while ends_close:
stack.pop()
ends_close -= 1
if len(stack) != 1:
raise IndexError
except IndexError:
# if there are too few or too many elements on the stack, just give up
# and treat the whole argument list as one argument, discarding the
# already partially populated paramlist node
paramlist = addnodes.desc_parameterlist()
paramlist += addnodes.desc_parameter(arglist, arglist)
signode += paramlist
else:
signode += paramlist
# This override allows our inline type specifiers to behave like :class: link
# when it comes to handling "." and "~" prefixes.
class PyXrefMixin:
def make_xref(self, rolename: str, domain: str, target: str,
innernode: Type[TextlikeNode] = nodes.emphasis,
contnode: Node = None, env: BuildEnvironment = None,
inliner: Inliner = None, location: Node = None) -> Node:
# we use inliner=None to make sure we get the old behaviour with a single
# pending_xref node
result = super().make_xref(rolename, domain, target, # type: ignore
innernode, contnode,
env, inliner=None, location=None)
result['refspecific'] = True
result['py:module'] = env.ref_context.get('py:module')
result['py:class'] = env.ref_context.get('py:class')
if target.startswith(('.', '~')):
prefix, result['reftarget'] = target[0], target[1:]
if prefix == '.':
text = target[1:]
elif prefix == '~':
text = target.split('.')[-1]
for node in result.traverse(nodes.Text):
node.parent[node.parent.index(node)] = nodes.Text(text)
break
elif isinstance(result, pending_xref) and env.config.python_use_unqualified_type_names:
children = result.children
result.clear()
shortname = target.split('.')[-1]
textnode = innernode('', shortname)
contnodes = [pending_xref_condition('', '', textnode, condition='resolved'),
pending_xref_condition('', '', *children, condition='*')]
result.extend(contnodes)
return result
def make_xrefs(self, rolename: str, domain: str, target: str,
innernode: Type[TextlikeNode] = nodes.emphasis,
contnode: Node = None, env: BuildEnvironment = None,
inliner: Inliner = None, location: Node = None) -> List[Node]:
delims = r'(\s*[\[\]\(\),](?:\s*or\s)?\s*|\s+or\s+|\s*\|\s*|\.\.\.)'
delims_re = re.compile(delims)
sub_targets = re.split(delims, target)
split_contnode = bool(contnode and contnode.astext() == target)
results = []
for sub_target in filter(None, sub_targets):
if split_contnode:
contnode = nodes.Text(sub_target)
if delims_re.match(sub_target):
results.append(contnode or innernode(sub_target, sub_target))
else:
results.append(self.make_xref(rolename, domain, sub_target,
innernode, contnode, env, inliner, location))
return results
class PyField(PyXrefMixin, Field):
def make_xref(self, rolename: str, domain: str, target: str,
innernode: Type[TextlikeNode] = nodes.emphasis,
contnode: Node = None, env: BuildEnvironment = None,
inliner: Inliner = None, location: Node = None) -> Node:
if rolename == 'class' and target == 'None':
# None is not a type, so use obj role instead.
rolename = 'obj'
return super().make_xref(rolename, domain, target, innernode, contnode,
env, inliner, location)
class PyGroupedField(PyXrefMixin, GroupedField):
pass
class PyTypedField(PyXrefMixin, TypedField):
def make_xref(self, rolename: str, domain: str, target: str,
innernode: Type[TextlikeNode] = nodes.emphasis,
contnode: Node = None, env: BuildEnvironment = None,
inliner: Inliner = None, location: Node = None) -> Node:
if rolename == 'class' and target == 'None':
# None is not a type, so use obj role instead.
rolename = 'obj'
return super().make_xref(rolename, domain, target, innernode, contnode,
env, inliner, location)
class PyObject(ObjectDescription[Tuple[str, str]]):
"""
Description of a general Python object.
:cvar allow_nesting: Class is an object that allows for nested namespaces
:vartype allow_nesting: bool
"""
option_spec: OptionSpec = {
'noindex': directives.flag,
'noindexentry': directives.flag,
'module': directives.unchanged,
'canonical': directives.unchanged,
'annotation': directives.unchanged,
}
doc_field_types = [
PyTypedField('parameter', label=_('Parameters'),
names=('param', 'parameter', 'arg', 'argument',
'keyword', 'kwarg', 'kwparam'),
typerolename='class', typenames=('paramtype', 'type'),
can_collapse=True),
PyTypedField('variable', label=_('Variables'),
names=('var', 'ivar', 'cvar'),
typerolename='class', typenames=('vartype',),
can_collapse=True),
PyGroupedField('exceptions', label=_('Raises'), rolename='exc',
names=('raises', 'raise', 'exception', 'except'),
can_collapse=True),
Field('returnvalue', label=_('Returns'), has_arg=False,
names=('returns', 'return')),
PyField('returntype', label=_('Return type'), has_arg=False,
names=('rtype',), bodyrolename='class'),
]
allow_nesting = False
def get_signature_prefix(self, sig: str) -> List[nodes.Node]:
"""May return a prefix to put before the object name in the
signature.
"""
return []
def needs_arglist(self) -> bool:
"""May return true if an empty argument list is to be generated even if
the document contains none.
"""
return False
def handle_signature(self, sig: str, signode: desc_signature) -> Tuple[str, str]:
"""Transform a Python signature into RST nodes.
Return (fully qualified name of the thing, classname if any).
If inside a class, the current class name is handled intelligently:
* it is stripped from the displayed name if present
* it is added to the full name (return value) if not present
"""
m = py_sig_re.match(sig)
if m is None:
raise ValueError
prefix, name, arglist, retann = m.groups()
# determine module and class name (if applicable), as well as full name
modname = self.options.get('module', self.env.ref_context.get('py:module'))
classname = self.env.ref_context.get('py:class')
if classname:
add_module = False
if prefix and (prefix == classname or
prefix.startswith(classname + ".")):
fullname = prefix + name
# class name is given again in the signature
prefix = prefix[len(classname):].lstrip('.')
elif prefix:
# class name is given in the signature, but different
# (shouldn't happen)
fullname = classname + '.' + prefix + name
else:
# class name is not given in the signature
fullname = classname + '.' + name
else:
add_module = True
if prefix:
classname = prefix.rstrip('.')
fullname = prefix + name
else:
classname = ''
fullname = name
signode['module'] = modname
signode['class'] = classname
signode['fullname'] = fullname
sig_prefix = self.get_signature_prefix(sig)
if sig_prefix:
signode += addnodes.desc_annotation(str(sig_prefix), '', *sig_prefix)
if prefix:
signode += addnodes.desc_addname(prefix, prefix)
elif modname and add_module and self.env.config.add_module_names:
nodetext = modname + '.'
signode += addnodes.desc_addname(nodetext, nodetext)
signode += addnodes.desc_name(name, name)
if arglist:
try:
signode += _parse_arglist(arglist, self.env)
except SyntaxError:
# fallback to parse arglist original parser.
# it supports to represent optional arguments (ex. "func(foo [, bar])")
_pseudo_parse_arglist(signode, arglist)
except NotImplementedError as exc:
logger.warning("could not parse arglist (%r): %s", arglist, exc,
location=signode)
_pseudo_parse_arglist(signode, arglist)
else:
if self.needs_arglist():
# for callables, add an empty parameter list
signode += addnodes.desc_parameterlist()
if retann:
children = _parse_annotation(retann, self.env)
signode += addnodes.desc_returns(retann, '', *children)
anno = self.options.get('annotation')
if anno:
signode += addnodes.desc_annotation(' ' + anno, '',
addnodes.desc_sig_space(),
nodes.Text(anno))
return fullname, prefix
def get_index_text(self, modname: str, name: Tuple[str, str]) -> str:
"""Return the text for the index entry of the object."""
raise NotImplementedError('must be implemented in subclasses')
def add_target_and_index(self, name_cls: Tuple[str, str], sig: str,
signode: desc_signature) -> None:
modname = self.options.get('module', self.env.ref_context.get('py:module'))
fullname = (modname + '.' if modname else '') + name_cls[0]
node_id = make_id(self.env, self.state.document, '', fullname)
signode['ids'].append(node_id)
# Assign old styled node_id(fullname) not to break old hyperlinks (if possible)
# Note: Will removed in Sphinx-5.0 (RemovedInSphinx50Warning)
if node_id != fullname and fullname not in self.state.document.ids:
signode['ids'].append(fullname)
self.state.document.note_explicit_target(signode)
domain = cast(PythonDomain, self.env.get_domain('py'))
domain.note_object(fullname, self.objtype, node_id, location=signode)
canonical_name = self.options.get('canonical')
if canonical_name:
domain.note_object(canonical_name, self.objtype, node_id, aliased=True,
location=signode)
if 'noindexentry' not in self.options:
indextext = self.get_index_text(modname, name_cls)
if indextext:
self.indexnode['entries'].append(('single', indextext, node_id, '', None))
def before_content(self) -> None:
"""Handle object nesting before content
:py:class:`PyObject` represents Python language constructs. For
constructs that are nestable, such as a Python classes, this method will
build up a stack of the nesting hierarchy so that it can be later
de-nested correctly, in :py:meth:`after_content`.
For constructs that aren't nestable, the stack is bypassed, and instead
only the most recent object is tracked. This object prefix name will be
removed with :py:meth:`after_content`.
"""
prefix = None
if self.names:
# fullname and name_prefix come from the `handle_signature` method.
# fullname represents the full object name that is constructed using
# object nesting and explicit prefixes. `name_prefix` is the
# explicit prefix given in a signature
(fullname, name_prefix) = self.names[-1]
if self.allow_nesting:
prefix = fullname
elif name_prefix:
prefix = name_prefix.strip('.')
if prefix:
self.env.ref_context['py:class'] = prefix
if self.allow_nesting:
classes = self.env.ref_context.setdefault('py:classes', [])
classes.append(prefix)
if 'module' in self.options:
modules = self.env.ref_context.setdefault('py:modules', [])
modules.append(self.env.ref_context.get('py:module'))
self.env.ref_context['py:module'] = self.options['module']
def after_content(self) -> None:
"""Handle object de-nesting after content
If this class is a nestable object, removing the last nested class prefix
ends further nesting in the object.
If this class is not a nestable object, the list of classes should not
be altered as we didn't affect the nesting levels in
:py:meth:`before_content`.
"""
classes = self.env.ref_context.setdefault('py:classes', [])
if self.allow_nesting:
try:
classes.pop()
except IndexError:
pass
self.env.ref_context['py:class'] = (classes[-1] if len(classes) > 0
else None)
if 'module' in self.options:
modules = self.env.ref_context.setdefault('py:modules', [])
if modules:
self.env.ref_context['py:module'] = modules.pop()
else:
self.env.ref_context.pop('py:module')
class PyFunction(PyObject):
"""Description of a function."""
option_spec: OptionSpec = PyObject.option_spec.copy()
option_spec.update({
'async': directives.flag,
})
def get_signature_prefix(self, sig: str) -> List[nodes.Node]:
if 'async' in self.options:
return [addnodes.desc_sig_keyword('', 'async'),
addnodes.desc_sig_space()]
else:
return []
def needs_arglist(self) -> bool:
return True
def add_target_and_index(self, name_cls: Tuple[str, str], sig: str,
signode: desc_signature) -> None:
super().add_target_and_index(name_cls, sig, signode)
if 'noindexentry' not in self.options:
modname = self.options.get('module', self.env.ref_context.get('py:module'))
node_id = signode['ids'][0]
name, cls = name_cls
if modname:
text = _('%s() (in module %s)') % (name, modname)
self.indexnode['entries'].append(('single', text, node_id, '', None))
else:
text = '%s; %s()' % (pairindextypes['builtin'], name)
self.indexnode['entries'].append(('pair', text, node_id, '', None))
def get_index_text(self, modname: str, name_cls: Tuple[str, str]) -> str:
# add index in own add_target_and_index() instead.
return None
class PyDecoratorFunction(PyFunction):
"""Description of a decorator."""
def run(self) -> List[Node]:
# a decorator function is a function after all
self.name = 'py:function'
return super().run()
def handle_signature(self, sig: str, signode: desc_signature) -> Tuple[str, str]:
ret = super().handle_signature(sig, signode)
signode.insert(0, addnodes.desc_addname('@', '@'))
return ret
def needs_arglist(self) -> bool:
return False
class PyVariable(PyObject):
"""Description of a variable."""
option_spec: OptionSpec = PyObject.option_spec.copy()
option_spec.update({
'type': directives.unchanged,
'value': directives.unchanged,
})
def handle_signature(self, sig: str, signode: desc_signature) -> Tuple[str, str]:
fullname, prefix = super().handle_signature(sig, signode)
typ = self.options.get('type')
if typ:
annotations = _parse_annotation(typ, self.env)
signode += addnodes.desc_annotation(typ, '',
addnodes.desc_sig_punctuation('', ':'),
addnodes.desc_sig_space(), *annotations)
value = self.options.get('value')
if value:
signode += addnodes.desc_annotation(value, '',
addnodes.desc_sig_space(),
addnodes.desc_sig_punctuation('', '='),
addnodes.desc_sig_space(),
nodes.Text(value))
return fullname, prefix
def get_index_text(self, modname: str, name_cls: Tuple[str, str]) -> str:
name, cls = name_cls
if modname:
return _('%s (in module %s)') % (name, modname)
else:
return _('%s (built-in variable)') % name
class PyClasslike(PyObject):
"""
Description of a class-like object (classes, interfaces, exceptions).
"""
option_spec: OptionSpec = PyObject.option_spec.copy()
option_spec.update({
'final': directives.flag,
})
allow_nesting = True
def get_signature_prefix(self, sig: str) -> List[nodes.Node]:
if 'final' in self.options:
return [nodes.Text('final'), addnodes.desc_sig_space(),
nodes.Text(self.objtype), addnodes.desc_sig_space()]
else:
return [nodes.Text(self.objtype), addnodes.desc_sig_space()]
def get_index_text(self, modname: str, name_cls: Tuple[str, str]) -> str:
if self.objtype == 'class':
if not modname:
return _('%s (built-in class)') % name_cls[0]
return _('%s (class in %s)') % (name_cls[0], modname)
elif self.objtype == 'exception':
return name_cls[0]
else:
return ''
class PyMethod(PyObject):
"""Description of a method."""
option_spec: OptionSpec = PyObject.option_spec.copy()
option_spec.update({
'abstractmethod': directives.flag,
'async': directives.flag,
'classmethod': directives.flag,
'final': directives.flag,
'property': directives.flag,
'staticmethod': directives.flag,
})
def needs_arglist(self) -> bool:
if 'property' in self.options:
return False
else:
return True
def get_signature_prefix(self, sig: str) -> List[nodes.Node]:
prefix: List[nodes.Node] = []
if 'final' in self.options:
prefix.append(nodes.Text('final'))
prefix.append(addnodes.desc_sig_space())
if 'abstractmethod' in self.options:
prefix.append(nodes.Text('abstract'))
prefix.append(addnodes.desc_sig_space())
if 'async' in self.options:
prefix.append(nodes.Text('async'))
prefix.append(addnodes.desc_sig_space())
if 'classmethod' in self.options:
prefix.append(nodes.Text('classmethod'))
prefix.append(addnodes.desc_sig_space())
if 'property' in self.options:
prefix.append(nodes.Text('property'))
prefix.append(addnodes.desc_sig_space())
if 'staticmethod' in self.options:
prefix.append(nodes.Text('static'))
prefix.append(addnodes.desc_sig_space())
return prefix
def get_index_text(self, modname: str, name_cls: Tuple[str, str]) -> str:
name, cls = name_cls
try:
clsname, methname = name.rsplit('.', 1)
if modname and self.env.config.add_module_names:
clsname = '.'.join([modname, clsname])
except ValueError:
if modname:
return _('%s() (in module %s)') % (name, modname)
else:
return '%s()' % name
if 'classmethod' in self.options:
return _('%s() (%s class method)') % (methname, clsname)
elif 'property' in self.options:
return _('%s (%s property)') % (methname, clsname)
elif 'staticmethod' in self.options:
return _('%s() (%s static method)') % (methname, clsname)
else:
return _('%s() (%s method)') % (methname, clsname)
class PyClassMethod(PyMethod):
"""Description of a classmethod."""
option_spec: OptionSpec = PyObject.option_spec.copy()
def run(self) -> List[Node]:
self.name = 'py:method'
self.options['classmethod'] = True
return super().run()
class PyStaticMethod(PyMethod):
"""Description of a staticmethod."""
option_spec: OptionSpec = PyObject.option_spec.copy()
def run(self) -> List[Node]:
self.name = 'py:method'
self.options['staticmethod'] = True
return super().run()
class PyDecoratorMethod(PyMethod):
"""Description of a decoratormethod."""
def run(self) -> List[Node]:
self.name = 'py:method'
return super().run()
def handle_signature(self, sig: str, signode: desc_signature) -> Tuple[str, str]:
ret = super().handle_signature(sig, signode)
signode.insert(0, addnodes.desc_addname('@', '@'))
return ret
def needs_arglist(self) -> bool:
return False
class PyAttribute(PyObject):
"""Description of an attribute."""
option_spec: OptionSpec = PyObject.option_spec.copy()
option_spec.update({
'type': directives.unchanged,
'value': directives.unchanged,
})
def handle_signature(self, sig: str, signode: desc_signature) -> Tuple[str, str]:
fullname, prefix = super().handle_signature(sig, signode)
typ = self.options.get('type')
if typ:
annotations = _parse_annotation(typ, self.env)
signode += addnodes.desc_annotation(typ, '',
addnodes.desc_sig_punctuation('', ':'),
addnodes.desc_sig_space(),
*annotations)
value = self.options.get('value')
if value:
signode += addnodes.desc_annotation(value, '',
addnodes.desc_sig_space(),
addnodes.desc_sig_punctuation('', '='),
addnodes.desc_sig_space(),
nodes.Text(value))
return fullname, prefix
def get_index_text(self, modname: str, name_cls: Tuple[str, str]) -> str:
name, cls = name_cls
try:
clsname, attrname = name.rsplit('.', 1)
if modname and self.env.config.add_module_names:
clsname = '.'.join([modname, clsname])
except ValueError:
if modname:
return _('%s (in module %s)') % (name, modname)
else:
return name
return _('%s (%s attribute)') % (attrname, clsname)
class PyProperty(PyObject):
"""Description of an attribute."""
option_spec = PyObject.option_spec.copy()
option_spec.update({
'abstractmethod': directives.flag,
'classmethod': directives.flag,
'type': directives.unchanged,
})
def handle_signature(self, sig: str, signode: desc_signature) -> Tuple[str, str]:
fullname, prefix = super().handle_signature(sig, signode)
typ = self.options.get('type')
if typ:
annotations = _parse_annotation(typ, self.env)
signode += addnodes.desc_annotation(typ, '',
addnodes.desc_sig_punctuation('', ':'),
addnodes.desc_sig_space(),
*annotations)
return fullname, prefix
def get_signature_prefix(self, sig: str) -> List[nodes.Node]:
prefix: List[nodes.Node] = []
if 'abstractmethod' in self.options:
prefix.append(nodes.Text('abstract'))
prefix.append(addnodes.desc_sig_space())
if 'classmethod' in self.options:
prefix.append(nodes.Text('class'))
prefix.append(addnodes.desc_sig_space())
prefix.append(nodes.Text('property'))
prefix.append(addnodes.desc_sig_space())
return prefix
def get_index_text(self, modname: str, name_cls: Tuple[str, str]) -> str:
name, cls = name_cls
try:
clsname, attrname = name.rsplit('.', 1)
if modname and self.env.config.add_module_names:
clsname = '.'.join([modname, clsname])
except ValueError:
if modname:
return _('%s (in module %s)') % (name, modname)
else:
return name
return _('%s (%s property)') % (attrname, clsname)
class PyDecoratorMixin:
"""
Mixin for decorator directives.
"""
def handle_signature(self, sig: str, signode: desc_signature) -> Tuple[str, str]:
for cls in self.__class__.__mro__:
if cls.__name__ != 'DirectiveAdapter':
warnings.warn('PyDecoratorMixin is deprecated. '
'Please check the implementation of %s' % cls,
RemovedInSphinx50Warning, stacklevel=2)
break
else:
warnings.warn('PyDecoratorMixin is deprecated',
RemovedInSphinx50Warning, stacklevel=2)
ret = super().handle_signature(sig, signode) # type: ignore
signode.insert(0, addnodes.desc_addname('@', '@'))
return ret
def needs_arglist(self) -> bool:
return False
class PyModule(SphinxDirective):
"""
Directive to mark description of a new module.
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
option_spec: OptionSpec = {
'platform': lambda x: x,
'synopsis': lambda x: x,
'noindex': directives.flag,
'deprecated': directives.flag,
}
def run(self) -> List[Node]:
domain = cast(PythonDomain, self.env.get_domain('py'))
modname = self.arguments[0].strip()
noindex = 'noindex' in self.options
self.env.ref_context['py:module'] = modname
ret: List[Node] = []
if not noindex:
# note module to the domain
node_id = make_id(self.env, self.state.document, 'module', modname)
target = nodes.target('', '', ids=[node_id], ismod=True)
self.set_source_info(target)
# Assign old styled node_id not to break old hyperlinks (if possible)
# Note: Will removed in Sphinx-5.0 (RemovedInSphinx50Warning)
old_node_id = self.make_old_id(modname)
if node_id != old_node_id and old_node_id not in self.state.document.ids:
target['ids'].append(old_node_id)
self.state.document.note_explicit_target(target)
domain.note_module(modname,
node_id,
self.options.get('synopsis', ''),
self.options.get('platform', ''),
'deprecated' in self.options)
domain.note_object(modname, 'module', node_id, location=target)
# the platform and synopsis aren't printed; in fact, they are only
# used in the modindex currently
ret.append(target)
indextext = '%s; %s' % (pairindextypes['module'], modname)
inode = addnodes.index(entries=[('pair', indextext, node_id, '', None)])
ret.append(inode)
return ret
def make_old_id(self, name: str) -> str:
"""Generate old styled node_id.
Old styled node_id is incompatible with docutils' node_id.
It can contain dots and hyphens.
.. note:: Old styled node_id was mainly used until Sphinx-3.0.
"""
return 'module-%s' % name
class PyCurrentModule(SphinxDirective):
"""
This directive is just to tell Sphinx that we're documenting
stuff in module foo, but links to module foo won't lead here.
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
option_spec: OptionSpec = {}
def run(self) -> List[Node]:
modname = self.arguments[0].strip()
if modname == 'None':
self.env.ref_context.pop('py:module', None)
else:
self.env.ref_context['py:module'] = modname
return []
class PyXRefRole(XRefRole):
def process_link(self, env: BuildEnvironment, refnode: Element,
has_explicit_title: bool, title: str, target: str) -> Tuple[str, str]:
refnode['py:module'] = env.ref_context.get('py:module')
refnode['py:class'] = env.ref_context.get('py:class')
if not has_explicit_title:
title = title.lstrip('.') # only has a meaning for the target
target = target.lstrip('~') # only has a meaning for the title
# if the first character is a tilde, don't display the module/class
# parts of the contents
if title[0:1] == '~':
title = title[1:]
dot = title.rfind('.')
if dot != -1:
title = title[dot + 1:]
# if the first character is a dot, search more specific namespaces first
# else search builtins first
if target[0:1] == '.':
target = target[1:]
refnode['refspecific'] = True
return title, target
def filter_meta_fields(app: Sphinx, domain: str, objtype: str, content: Element) -> None:
"""Filter ``:meta:`` field from its docstring."""
if domain != 'py':
return
for node in content:
if isinstance(node, nodes.field_list):
fields = cast(List[nodes.field], node)
for field in fields:
field_name = cast(nodes.field_body, field[0]).astext().strip()
if field_name == 'meta' or field_name.startswith('meta '):
node.remove(field)
break
class PythonModuleIndex(Index):
"""
Index subclass to provide the Python module index.
"""
name = 'modindex'
localname = _('Python Module Index')
shortname = _('modules')
def generate(self, docnames: Iterable[str] = None
) -> Tuple[List[Tuple[str, List[IndexEntry]]], bool]:
content: Dict[str, List[IndexEntry]] = {}
# list of prefixes to ignore
ignores: List[str] = self.domain.env.config['modindex_common_prefix']
ignores = sorted(ignores, key=len, reverse=True)
# list of all modules, sorted by module name
modules = sorted(self.domain.data['modules'].items(),
key=lambda x: x[0].lower())
# sort out collapsible modules
prev_modname = ''
num_toplevels = 0
for modname, (docname, node_id, synopsis, platforms, deprecated) in modules:
if docnames and docname not in docnames:
continue
for ignore in ignores:
if modname.startswith(ignore):
modname = modname[len(ignore):]
stripped = ignore
break
else:
stripped = ''
# we stripped the whole module name?
if not modname:
modname, stripped = stripped, ''
entries = content.setdefault(modname[0].lower(), [])
package = modname.split('.')[0]
if package != modname:
# it's a submodule
if prev_modname == package:
# first submodule - make parent a group head
if entries:
last = entries[-1]
entries[-1] = IndexEntry(last[0], 1, last[2], last[3],
last[4], last[5], last[6])
elif not prev_modname.startswith(package):
# submodule without parent in list, add dummy entry
entries.append(IndexEntry(stripped + package, 1, '', '', '', '', ''))
subtype = 2
else:
num_toplevels += 1
subtype = 0
qualifier = _('Deprecated') if deprecated else ''
entries.append(IndexEntry(stripped + modname, subtype, docname,
node_id, platforms, qualifier, synopsis))
prev_modname = modname
# apply heuristics when to collapse modindex at page load:
# only collapse if number of toplevel modules is larger than
# number of submodules
collapse = len(modules) - num_toplevels < num_toplevels
# sort by first letter
sorted_content = sorted(content.items())
return sorted_content, collapse
class PythonDomain(Domain):
"""Python language domain."""
name = 'py'
label = 'Python'
object_types: Dict[str, ObjType] = {
'function': ObjType(_('function'), 'func', 'obj'),
'data': ObjType(_('data'), 'data', 'obj'),
'class': ObjType(_('class'), 'class', 'exc', 'obj'),
'exception': ObjType(_('exception'), 'exc', 'class', 'obj'),
'method': ObjType(_('method'), 'meth', 'obj'),
'classmethod': ObjType(_('class method'), 'meth', 'obj'),
'staticmethod': ObjType(_('static method'), 'meth', 'obj'),
'attribute': ObjType(_('attribute'), 'attr', 'obj'),
'property': ObjType(_('property'), 'attr', '_prop', 'obj'),
'module': ObjType(_('module'), 'mod', 'obj'),
}
directives = {
'function': PyFunction,
'data': PyVariable,
'class': PyClasslike,
'exception': PyClasslike,
'method': PyMethod,
'classmethod': PyClassMethod,
'staticmethod': PyStaticMethod,
'attribute': PyAttribute,
'property': PyProperty,
'module': PyModule,
'currentmodule': PyCurrentModule,
'decorator': PyDecoratorFunction,
'decoratormethod': PyDecoratorMethod,
}
roles = {
'data': PyXRefRole(),
'exc': PyXRefRole(),
'func': PyXRefRole(fix_parens=True),
'class': PyXRefRole(),
'const': PyXRefRole(),
'attr': PyXRefRole(),
'meth': PyXRefRole(fix_parens=True),
'mod': PyXRefRole(),
'obj': PyXRefRole(),
}
initial_data: Dict[str, Dict[str, Tuple[Any]]] = {
'objects': {}, # fullname -> docname, objtype
'modules': {}, # modname -> docname, synopsis, platform, deprecated
}
indices = [
PythonModuleIndex,
]
@property
def objects(self) -> Dict[str, ObjectEntry]:
return self.data.setdefault('objects', {}) # fullname -> ObjectEntry
def note_object(self, name: str, objtype: str, node_id: str,
aliased: bool = False, location: Any = None) -> None:
"""Note a python object for cross reference.
.. versionadded:: 2.1
"""
if name in self.objects:
other = self.objects[name]
if other.aliased and aliased is False:
# The original definition found. Override it!
pass
elif other.aliased is False and aliased:
# The original definition is already registered.
return
else:
# duplicated
logger.warning(__('duplicate object description of %s, '
'other instance in %s, use :noindex: for one of them'),
name, other.docname, location=location)
self.objects[name] = ObjectEntry(self.env.docname, node_id, objtype, aliased)
@property
def modules(self) -> Dict[str, ModuleEntry]:
return self.data.setdefault('modules', {}) # modname -> ModuleEntry
def note_module(self, name: str, node_id: str, synopsis: str,
platform: str, deprecated: bool) -> None:
"""Note a python module for cross reference.
.. versionadded:: 2.1
"""
self.modules[name] = ModuleEntry(self.env.docname, node_id,
synopsis, platform, deprecated)
def clear_doc(self, docname: str) -> None:
for fullname, obj in list(self.objects.items()):
if obj.docname == docname:
del self.objects[fullname]
for modname, mod in list(self.modules.items()):
if mod.docname == docname:
del self.modules[modname]
def merge_domaindata(self, docnames: List[str], otherdata: Dict) -> None:
# XXX check duplicates?
for fullname, obj in otherdata['objects'].items():
if obj.docname in docnames:
self.objects[fullname] = obj
for modname, mod in otherdata['modules'].items():
if mod.docname in docnames:
self.modules[modname] = mod
def find_obj(self, env: BuildEnvironment, modname: str, classname: str,
name: str, type: str, searchmode: int = 0
) -> List[Tuple[str, ObjectEntry]]:
"""Find a Python object for "name", perhaps using the given module
and/or classname. Returns a list of (name, object entry) tuples.
"""
# skip parens
if name[-2:] == '()':
name = name[:-2]
if not name:
return []
matches: List[Tuple[str, ObjectEntry]] = []
newname = None
if searchmode == 1:
if type is None:
objtypes = list(self.object_types)
else:
objtypes = self.objtypes_for_role(type)
if objtypes is not None:
if modname and classname:
fullname = modname + '.' + classname + '.' + name
if fullname in self.objects and self.objects[fullname].objtype in objtypes:
newname = fullname
if not newname:
if modname and modname + '.' + name in self.objects and \
self.objects[modname + '.' + name].objtype in objtypes:
newname = modname + '.' + name
elif name in self.objects and self.objects[name].objtype in objtypes:
newname = name
else:
# "fuzzy" searching mode
searchname = '.' + name
matches = [(oname, self.objects[oname]) for oname in self.objects
if oname.endswith(searchname) and
self.objects[oname].objtype in objtypes]
else:
# NOTE: searching for exact match, object type is not considered
if name in self.objects:
newname = name
elif type == 'mod':
# only exact matches allowed for modules
return []
elif classname and classname + '.' + name in self.objects:
newname = classname + '.' + name
elif modname and modname + '.' + name in self.objects:
newname = modname + '.' + name
elif modname and classname and \
modname + '.' + classname + '.' + name in self.objects:
newname = modname + '.' + classname + '.' + name
if newname is not None:
matches.append((newname, self.objects[newname]))
return matches
def resolve_xref(self, env: BuildEnvironment, fromdocname: str, builder: Builder,
type: str, target: str, node: pending_xref, contnode: Element
) -> Optional[Element]:
modname = node.get('py:module')
clsname = node.get('py:class')
searchmode = 1 if node.hasattr('refspecific') else 0
matches = self.find_obj(env, modname, clsname, target,
type, searchmode)
if not matches and type == 'attr':
# fallback to meth (for property; Sphinx-2.4.x)
# this ensures that `:attr:` role continues to refer to the old property entry
# that defined by ``method`` directive in old reST files.
matches = self.find_obj(env, modname, clsname, target, 'meth', searchmode)
if not matches and type == 'meth':
# fallback to attr (for property)
# this ensures that `:meth:` in the old reST files can refer to the property
# entry that defined by ``property`` directive.
#
# Note: _prop is a secret role only for internal look-up.
matches = self.find_obj(env, modname, clsname, target, '_prop', searchmode)
if not matches:
return None
elif len(matches) > 1:
canonicals = [m for m in matches if not m[1].aliased]
if len(canonicals) == 1:
matches = canonicals
else:
logger.warning(__('more than one target found for cross-reference %r: %s'),
target, ', '.join(match[0] for match in matches),
type='ref', subtype='python', location=node)
name, obj = matches[0]
if obj[2] == 'module':
return self._make_module_refnode(builder, fromdocname, name, contnode)
else:
# determine the content of the reference by conditions
content = find_pending_xref_condition(node, 'resolved')
if content:
children = content.children
else:
# if not found, use contnode
children = [contnode]
return make_refnode(builder, fromdocname, obj[0], obj[1], children, name)
def resolve_any_xref(self, env: BuildEnvironment, fromdocname: str, builder: Builder,
target: str, node: pending_xref, contnode: Element
) -> List[Tuple[str, Element]]:
modname = node.get('py:module')
clsname = node.get('py:class')
results: List[Tuple[str, Element]] = []
# always search in "refspecific" mode with the :any: role
matches = self.find_obj(env, modname, clsname, target, None, 1)
for name, obj in matches:
if obj[2] == 'module':
results.append(('py:mod',
self._make_module_refnode(builder, fromdocname,
name, contnode)))
else:
# determine the content of the reference by conditions
content = find_pending_xref_condition(node, 'resolved')
if content:
children = content.children
else:
# if not found, use contnode
children = [contnode]
results.append(('py:' + self.role_for_objtype(obj[2]),
make_refnode(builder, fromdocname, obj[0], obj[1],
children, name)))
return results
def _make_module_refnode(self, builder: Builder, fromdocname: str, name: str,
contnode: Node) -> Element:
# get additional info for modules
module = self.modules[name]
title = name
if module.synopsis:
title += ': ' + module.synopsis
if module.deprecated:
title += _(' (deprecated)')
if module.platform:
title += ' (' + module.platform + ')'
return make_refnode(builder, fromdocname, module.docname, module.node_id,
contnode, title)
def get_objects(self) -> Iterator[Tuple[str, str, str, str, str, int]]:
for modname, mod in self.modules.items():
yield (modname, modname, 'module', mod.docname, mod.node_id, 0)
for refname, obj in self.objects.items():
if obj.objtype != 'module': # modules are already handled
if obj.aliased:
# aliased names are not full-text searchable.
yield (refname, refname, obj.objtype, obj.docname, obj.node_id, -1)
else:
yield (refname, refname, obj.objtype, obj.docname, obj.node_id, 1)
def get_full_qualified_name(self, node: Element) -> Optional[str]:
modname = node.get('py:module')
clsname = node.get('py:class')
target = node.get('reftarget')
if target is None:
return None
else:
return '.'.join(filter(None, [modname, clsname, target]))
def builtin_resolver(app: Sphinx, env: BuildEnvironment,
node: pending_xref, contnode: Element) -> Element:
"""Do not emit nitpicky warnings for built-in types."""
def istyping(s: str) -> bool:
if s.startswith('typing.'):
s = s.split('.', 1)[1]
return s in typing.__all__ # type: ignore
if node.get('refdomain') != 'py':
return None
elif node.get('reftype') in ('class', 'obj') and node.get('reftarget') == 'None':
return contnode
elif node.get('reftype') in ('class', 'exc'):
reftarget = node.get('reftarget')
if inspect.isclass(getattr(builtins, reftarget, None)):
# built-in class
return contnode
elif istyping(reftarget):
# typing class
return contnode
return None
def setup(app: Sphinx) -> Dict[str, Any]:
app.setup_extension('sphinx.directives')
app.add_domain(PythonDomain)
app.add_config_value('python_use_unqualified_type_names', False, 'env')
app.connect('object-description-transform', filter_meta_fields)
app.connect('missing-reference', builtin_resolver, priority=900)
return {
'version': 'builtin',
'env_version': 3,
'parallel_read_safe': True,
'parallel_write_safe': True,
}
|
py | 1a3bd68fa1c3bb0e1b8b2027c730bd1ac6f0b177 | """
In this file one can find the implementation of helpful class and functions in order to handle the given dataset, in the
aspect of its structure.
Here is the implementation of helpful class and functions that handle the given dataset.
"""
import json
import csv
from scipy.stats import zscore
from torch import Tensor
from torch.nn import ConstantPad2d
from torch.utils.data import Dataset, DataLoader
from collections import Counter
from feature_calculators import FeatureMeta
from features_processor import FeaturesProcessor, log_norm
from graph_features import GraphFeatures
from loggers import PrintLogger
from multi_graph import MultiGraph
from dataset.dataset_external_data import ExternalData
import os
import pandas as pd
import networkx as nx
import pickle
import numpy as np
from vertices.betweenness_centrality import BetweennessCentralityCalculator
from vertices.bfs_moments import BfsMomentsCalculator
from sklearn.preprocessing import MinMaxScaler
# some important shortenings
PKL_DIR = "pkl"
NORM_REDUCED = "NORM_REDUCED"
NORM_REDUCED_SYMMETRIC = "NORM_REDUCED_SYMMETRIC"
IDENTITY = "IDENTITY"
RAW_FORM = "RAW_FORM"
DEG = "DEG"
IN_DEG = "IN_DEG"
OUT_DEG = "OUT_DEG"
CENTRALITY = ("betweenness_centrality", FeatureMeta(BetweennessCentralityCalculator, {"betweenness"}))
BFS = ("bfs_moments", FeatureMeta(BfsMomentsCalculator, {"bfs"}))
class GraphsDataset(Dataset):
def __init__(self, params, external_data: ExternalData = None):
# load the params file (json) in the "graphs_data" section.
self._params = params if type(params) is dict else json.load(open(params, "rt"))
self._dataset_name = self._params["dataset_name"]
self._params = self._params["graphs_data"]
self._logger = PrintLogger("logger")
# path to base directory
self._base_dir = __file__.replace("/", os.sep)
self._base_dir = os.path.join(self._base_dir.rsplit(os.sep, 1)[0], "..")
self._external_data = external_data
# init ftr_meta dictionary and other ftr attributes
self._init_ftrs()
self._src_file_path = os.path.join(self._params["file_path"])
self._multi_graph, self._labels, self._label_to_idx, self._idx_to_label = self._build_multi_graph()
self._data, self._idx_to_name = self._build_data()
@property
def all_labels(self):
return self._idx_to_label
@property
def label_count(self):
return Counter([v[3] for name, v in self._data.items()])
def label(self, idx):
return self._data[self._idx_to_name[idx]][3]
@property
def len_features(self):
return self._data[self._idx_to_name[0]][1].shape[1]
# Initialization of the requested features
def _init_ftrs(self):
self._deg, self._in_deg, self._out_deg, self._is_ftr, self._ftr_meta = False, False, False, False, {}
self._is_external_data = False if self._external_data is None else True
# params.FEATURES contains string and list of two elements (matching to key: value)
# should Deg/In-Deg/Out-Deg be calculated
for ftr in self._params["features"]:
ftr = globals()[ftr]
if ftr == DEG:
self._deg = True
elif ftr == IN_DEG:
self._in_deg = True
elif ftr == OUT_DEG:
self._out_deg = True
else:
self._ftr_meta[ftr[0]] = ftr[1]
# add directories for pickles
if len(self._ftr_meta) > 0:
self._ftr_path = os.path.join(self._base_dir, PKL_DIR, "ftr", self._dataset_name)
if not os.path.exists(self._ftr_path):
os.mkdir(self._ftr_path)
# if there are another features except degrees such as Betweeness
self._is_ftr = True
"""
build multi graph according to csv
each community is a single graph, no consideration to time
"""
def _build_multi_graph(self):
# percentage is the "amount" of the graph we take. For example, percentage=1 means the whole graph is taken,
# percentage=0.6 means 60% of the graph is taken , ....
path_pkl = os.path.join(self._base_dir, PKL_DIR, self._dataset_name + "_split_" +
str(self._params["percentage"]) + "_mg.pkl")
# a path to where the pickles will be was created, if it exists it means the graph has already be built, thus we
# load the pickle and return it
if os.path.exists(path_pkl):
return pickle.load(open(path_pkl, "rb"))
multi_graph_dict = {}
labels = {}
label_to_idx = {}
# open basic data csv (with all edges of all times)
data_df = pd.read_csv(self._src_file_path)
stop = data_df.shape[0] * self._params["percentage"]
for index, edge in data_df.iterrows():
if index > stop:
break
# write edge to dictionary
graph_id = str(edge[self._params["graph_col"]])
src = str(edge[self._params["src_col"]])
dst = str(edge[self._params["dst_col"]])
multi_graph_dict[graph_id] = multi_graph_dict.get(graph_id, []) + [(src, dst)]
label = edge[self._params["label_col"]]
label_to_idx[label] = len(label_to_idx) if label not in label_to_idx else label_to_idx[label]
labels[graph_id] = label_to_idx[label]
mg = MultiGraph(self._dataset_name, graphs_source=multi_graph_dict,
directed=self._params["directed"], logger=self._logger)
idx_to_label = [l for l in sorted(label_to_idx, key=lambda x: label_to_idx[x])]
mg.suspend_logger()
# make directories
os.makedirs(os.path.join(self._base_dir, PKL_DIR), exist_ok=True)
pickle.dump((mg, labels, label_to_idx, idx_to_label), open(path_pkl, "wb"))
mg.wake_logger()
return mg, labels, label_to_idx, idx_to_label
"""
returns a vector x for gnx
basic version returns degree for each node
"""
def _gnx_vec(self, gnx_id, gnx: nx.Graph, node_order):
# final vector that will have matrices of features
final_vec = []
# calculate degree for each node
if self._deg:
degrees = gnx.degree(gnx.nodes)
final_vec.append(np.matrix([np.log(degrees[d] + 1e-3) for d in node_order]).T)
# calculate in degree for each node
if self._in_deg:
degrees = gnx.in_degree(gnx.nodes)
final_vec.append(np.matrix([np.log(degrees[d] + 1e-3) for d in node_order]).T)
# calculate out degree for each node
if self._out_deg:
degrees = gnx.out_degree(gnx.nodes)
final_vec.append(np.matrix([np.log(degrees[d] + 1e-3) for d in node_order]).T)
# if external data is given, add its feature too
if self._is_external_data and self._external_data.is_continuous:
final_vec.append(np.matrix([self._external_data.continuous_feature(gnx_id, d) for d in node_order]))
# if the are more features except degrees and external ones, such as betweeness.
if self._is_ftr:
name = str(gnx_id)
# create a path if it does not exist yet
gnx_dir_path = os.path.join(self._ftr_path, name)
if not os.path.exists(gnx_dir_path):
os.mkdir(gnx_dir_path)
# Graph Feature is a class from the package "graph features" which calculates the given features
raw_ftr = GraphFeatures(gnx, self._ftr_meta, dir_path=gnx_dir_path, is_max_connected=False,
logger=PrintLogger("logger"))
raw_ftr.build(should_dump=True) # build features
final_vec.append(FeaturesProcessor(raw_ftr).as_matrix(norm_func=log_norm))
# the list of all matrices of features is stacked in order to create an only matrix for all features
return np.hstack(final_vec)
# calculate degree matrix
def _degree_matrix(self, gnx, nodelist):
degrees = gnx.degree(gnx.nodes)
return np.diag([degrees[d] for d in nodelist])
# function to standarize the data with zscore, min-max and more
def _standardize_data(self, data):
all_data_continuous_vec = [] # stack all vectors for all graphs
key_to_idx_map = [] # keep ordered list (g_id, num_nodes) according to stack order
# stack
for g_id, (A, gnx_vec, embed_vec, label) in data.items():
all_data_continuous_vec.append(gnx_vec)
key_to_idx_map.append((g_id, gnx_vec.shape[0])) # g_id, number of nodes ... ordered
all_data_continuous_vec = np.vstack(all_data_continuous_vec)
# z-score data
if self._params["standardization"] == "zscore":
standardized_data = zscore(all_data_continuous_vec, axis=0)
# scale data (still hasn't bee implemented)
elif self._params["standardization"] == "scale":
pass
# min-max data
elif self._params["standardization"] == "min_max":
scalar = MinMaxScaler()
standardized_data = scalar.fit_transform(all_data_continuous_vec)
# rebuild data to original form -> split stacked matrix according to <list: (g_id, num_nodes)>
new_data_dict = {}
start_idx = 0
for g_id, num_nodes in key_to_idx_map:
new_data_dict[g_id] = (data[g_id][0], standardized_data[start_idx: start_idx+num_nodes],
data[g_id][2], data[g_id][3])
start_idx += num_nodes
return new_data_dict
# For the GCN the adjacency matrix needs to be normalized
def _norm_adjacency(self, A, gnx, node_order):
if self._params["adjacency_norm"] == NORM_REDUCED:
# D^-0.5 A D^-0.5
D = self._degree_matrix(gnx, nodelist=node_order)
D_sqrt = np.matrix(np.sqrt(D))
adjacency = D_sqrt * np.matrix(A) * D_sqrt
elif self._params["adjacency_norm"] == NORM_REDUCED_SYMMETRIC:
# D^-0.5 [A + A.T + I] D^-0.5
D = self._degree_matrix(gnx, nodelist=node_order)
D_sqrt = np.matrix(np.sqrt(D))
adjacency = D_sqrt * np.matrix(A + A.T + np.identity(A.shape[0])) * D_sqrt
elif self._params["adjacency_norm"] == IDENTITY:
# identity matrix instead of adjacency matrix
adjacency = np.identity(A.shape[0])
elif self._params["adjacency_norm"] == RAW_FORM:
# don't do any normalization
adjacency = A
else:
print("Error in adjacency_norm: " + self._params["adjacency_norm"] + "is not a valid option")
exit(1)
return adjacency
"""
builds a data dictionary
{ ... graph_name: ( A = Adjacency_matrix, x = graph_vec, label ) ... }
We use all the above functions to finally build the whole data model
We use all the above functions to finally build the whole data modelst
"""
def _build_data(self):
ext_data_id = "None" if not self._is_external_data else "_embed_ftr_" + "_".join(self._external_data.embed_headers)\
+ "_continuous_ftr_" + "_".join(self._external_data.continuous_headers) \
+ "standardization_" + self._params["standardization"]
pkl_path = os.path.join(self._base_dir, PKL_DIR, self._dataset_name + ext_data_id + "_data.pkl")
if os.path.exists(pkl_path):
return pickle.load(open(pkl_path, "rb"))
data = {}
idx_to_name = []
for gnx_id, gnx in zip(self._multi_graph.graph_names(), self._multi_graph.graphs()):
# if gnx.number_of_nodes() < 5:
# continue
node_order = list(gnx.nodes)
idx_to_name.append(gnx_id)
adjacency = self._norm_adjacency(nx.adjacency_matrix(gnx, nodelist=node_order).todense(), gnx, node_order)
gnx_vec = self._gnx_vec(gnx_id, gnx, node_order)
embed_vec = [self._external_data.embed_feature(gnx_id, d) for d in node_order] \
if self._is_external_data and self._external_data.is_embed else None
data[gnx_id] = (adjacency, gnx_vec, embed_vec, self._labels[gnx_id])
data = self._standardize_data(data)
pickle.dump((data, idx_to_name), open(pkl_path, "wb"))
return data, idx_to_name
def collate_fn(self, batch):
lengths_sequences = []
# calculate max word len + max char len
for A, x, e, l in batch:
lengths_sequences.append(A.shape[0])
# in order to pad all batch to a single dimension max length is needed
seq_max_len = np.max(lengths_sequences)
# new batch variables
adjacency_batch = []
x_batch = []
embeddings_batch = []
labels_batch = []
for A, x, e, l in batch:
# pad word vectors
adjacency_pad = ConstantPad2d((0, seq_max_len - A.shape[0], 0, seq_max_len - A.shape[0]), 0)
adjacency_batch.append(adjacency_pad(A).tolist())
vec_pad = ConstantPad2d((0, 0, 0, seq_max_len - A.shape[0]), 0)
x_batch.append(vec_pad(x).tolist())
embeddings_batch.append(vec_pad(e).tolist() if self._is_external_data and self._external_data.is_embed else e)
labels_batch.append(l)
return Tensor(adjacency_batch), Tensor(x_batch), Tensor(embeddings_batch).long(), Tensor(labels_batch).long()
def __getitem__(self, index):
gnx_id = self._idx_to_name[index]
A, x, embed, label = self._data[gnx_id]
embed = 0 if embed is None else Tensor(embed).long()
return Tensor(A), Tensor(x), embed, label
def __len__(self):
return len(self._idx_to_name)
|
py | 1a3bd72a33ee92fb3e1fba3e6eee6486106386fb | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
import functools
import hashlib
import time
import warnings
from datetime import timedelta
from typing import Any, Callable, Iterable, Optional, Union
from airflow import settings
from airflow.configuration import conf
from airflow.exceptions import (
AirflowException,
AirflowRescheduleException,
AirflowSensorTimeout,
AirflowSkipException,
)
from airflow.models.baseoperator import BaseOperator
from airflow.models.sensorinstance import SensorInstance
from airflow.models.skipmixin import SkipMixin
from airflow.models.taskreschedule import TaskReschedule
from airflow.ti_deps.deps.ready_to_reschedule import ReadyToRescheduleDep
from airflow.utils import timezone
from airflow.utils.context import Context
# We need to keep the import here because GCSToLocalFilesystemOperator released in
# Google Provider before 3.0.0 imported apply_defaults from here.
# See https://github.com/apache/airflow/issues/16035
from airflow.utils.decorators import apply_defaults # noqa: F401
from airflow.utils.docs import get_docs_url
# As documented in https://dev.mysql.com/doc/refman/5.7/en/datetime.html.
_MYSQL_TIMESTAMP_MAX = datetime.datetime(2038, 1, 19, 3, 14, 7, tzinfo=timezone.utc)
@functools.lru_cache(maxsize=None)
def _is_metadatabase_mysql() -> bool:
if settings.engine is None:
raise AirflowException("Must initialize ORM first")
return settings.engine.url.get_backend_name() == "mysql"
class PokeReturnValue:
"""
Sensors can optionally return an instance of the PokeReturnValue class in the poke method.
If an XCom value is supplied when the sensor is done, then the XCom value will be
pushed through the operator return value.
:param is_done: Set to true to indicate the sensor can stop poking.
:param xcom_value: An optional XCOM value to be returned by the operator.
"""
def __init__(self, is_done: bool, xcom_value: Optional[Any] = None) -> None:
self.xcom_value = xcom_value
self.is_done = is_done
def __bool__(self) -> bool:
return self.is_done
class BaseSensorOperator(BaseOperator, SkipMixin):
"""
Sensor operators are derived from this class and inherit these attributes.
Sensor operators keep executing at a time interval and succeed when
a criteria is met and fail if and when they time out.
:param soft_fail: Set to true to mark the task as SKIPPED on failure
:param poke_interval: Time in seconds that the job should wait in
between each tries
:param timeout: Time, in seconds before the task times out and fails.
:param mode: How the sensor operates.
Options are: ``{ poke | reschedule }``, default is ``poke``.
When set to ``poke`` the sensor is taking up a worker slot for its
whole execution time and sleeps between pokes. Use this mode if the
expected runtime of the sensor is short or if a short poke interval
is required. Note that the sensor will hold onto a worker slot and
a pool slot for the duration of the sensor's runtime in this mode.
When set to ``reschedule`` the sensor task frees the worker slot when
the criteria is not yet met and it's rescheduled at a later time. Use
this mode if the time before the criteria is met is expected to be
quite long. The poke interval should be more than one minute to
prevent too much load on the scheduler.
:param exponential_backoff: allow progressive longer waits between
pokes by using exponential backoff algorithm
"""
ui_color = '#e6f1f2' # type: str
valid_modes = ['poke', 'reschedule'] # type: Iterable[str]
# As the poke context in smart sensor defines the poking job signature only,
# The execution_fields defines other execution details
# for this tasks such as the customer defined timeout, the email and the alert
# setup. Smart sensor serialize these attributes into a different DB column so
# that smart sensor service is able to handle corresponding execution details
# without breaking the sensor poking logic with dedup.
execution_fields = (
'poke_interval',
'retries',
'execution_timeout',
'timeout',
'email',
'email_on_retry',
'email_on_failure',
)
# Adds one additional dependency for all sensor operators that checks if a
# sensor task instance can be rescheduled.
deps = BaseOperator.deps | {ReadyToRescheduleDep()}
def __init__(
self,
*,
poke_interval: float = 60,
timeout: float = conf.getfloat('sensors', 'default_timeout'),
soft_fail: bool = False,
mode: str = 'poke',
exponential_backoff: bool = False,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.poke_interval = poke_interval
self.soft_fail = soft_fail
self.timeout = timeout
self.mode = mode
self.exponential_backoff = exponential_backoff
self._validate_input_values()
self.sensor_service_enabled = conf.getboolean('smart_sensor', 'use_smart_sensor')
self.sensors_support_sensor_service = set(
map(lambda l: l.strip(), conf.get('smart_sensor', 'sensors_enabled').split(','))
)
def _validate_input_values(self) -> None:
if not isinstance(self.poke_interval, (int, float)) or self.poke_interval < 0:
raise AirflowException("The poke_interval must be a non-negative number")
if not isinstance(self.timeout, (int, float)) or self.timeout < 0:
raise AirflowException("The timeout must be a non-negative number")
if self.mode not in self.valid_modes:
raise AirflowException(
f"The mode must be one of {self.valid_modes},'{self.dag.dag_id if self.has_dag() else ''} "
f".{self.task_id}'; received '{self.mode}'."
)
# Sanity check for poke_interval isn't immediately over MySQL's TIMESTAMP limit.
# This check is only rudimentary to catch trivial user errors, e.g. mistakenly
# set the value to milliseconds instead of seconds. There's another check when
# we actually try to reschedule to ensure database sanity.
if self.reschedule and _is_metadatabase_mysql():
if timezone.utcnow() + datetime.timedelta(seconds=self.poke_interval) > _MYSQL_TIMESTAMP_MAX:
raise AirflowException(
f"Cannot set poke_interval to {self.poke_interval} seconds in reschedule "
f"mode since it will take reschedule time over MySQL's TIMESTAMP limit."
)
def poke(self, context: Context) -> Union[bool, PokeReturnValue]:
"""
Function that the sensors defined while deriving this class should
override.
"""
raise AirflowException('Override me.')
def is_smart_sensor_compatible(self):
check_list = [
not self.sensor_service_enabled,
self.on_success_callback,
self.on_retry_callback,
self.on_failure_callback,
]
if any(check_list):
return False
operator = self.__class__.__name__
return operator in self.sensors_support_sensor_service
def register_in_sensor_service(self, ti, context):
"""
Register ti in smart sensor service
:param ti: Task instance object.
:param context: TaskInstance template context from the ti.
:return: boolean
"""
docs_url = get_docs_url('concepts/smart-sensors.html#migrating-to-deferrable-operators')
warnings.warn(
'Your sensor is using Smart Sensors, which are deprecated.'
f' Please use Deferrable Operators instead. See {docs_url} for more info.',
DeprecationWarning,
)
poke_context = self.get_poke_context(context)
execution_context = self.get_execution_context(context)
return SensorInstance.register(ti, poke_context, execution_context)
def get_poke_context(self, context):
"""
Return a dictionary with all attributes in poke_context_fields. The
poke_context with operator class can be used to identify a unique
sensor job.
:param context: TaskInstance template context.
:return: A dictionary with key in poke_context_fields.
"""
if not context:
self.log.info("Function get_poke_context doesn't have a context input.")
poke_context_fields = getattr(self.__class__, "poke_context_fields", None)
result = {key: getattr(self, key, None) for key in poke_context_fields}
return result
def get_execution_context(self, context):
"""
Return a dictionary with all attributes in execution_fields. The
execution_context include execution requirement for each sensor task
such as timeout setup, email_alert setup.
:param context: TaskInstance template context.
:return: A dictionary with key in execution_fields.
"""
if not context:
self.log.info("Function get_execution_context doesn't have a context input.")
execution_fields = self.__class__.execution_fields
result = {key: getattr(self, key, None) for key in execution_fields}
if result['execution_timeout'] and isinstance(result['execution_timeout'], datetime.timedelta):
result['execution_timeout'] = result['execution_timeout'].total_seconds()
return result
def execute(self, context: Context) -> Any:
started_at: Union[datetime.datetime, float]
if self.reschedule:
# If reschedule, use the start date of the first try (first try can be either the very
# first execution of the task, or the first execution after the task was cleared.)
first_try_number = context['ti'].max_tries - self.retries + 1
task_reschedules = TaskReschedule.find_for_task_instance(
context['ti'], try_number=first_try_number
)
if not task_reschedules:
start_date = timezone.utcnow()
else:
start_date = task_reschedules[0].start_date
started_at = start_date
def run_duration() -> float:
# If we are in reschedule mode, then we have to compute diff
# based on the time in a DB, so can't use time.monotonic
return (timezone.utcnow() - start_date).total_seconds()
else:
started_at = start_monotonic = time.monotonic()
def run_duration() -> float:
return time.monotonic() - start_monotonic
try_number = 1
log_dag_id = self.dag.dag_id if self.has_dag() else ""
xcom_value = None
while True:
poke_return = self.poke(context)
if poke_return:
if isinstance(poke_return, PokeReturnValue):
xcom_value = poke_return.xcom_value
break
if run_duration() > self.timeout:
# If sensor is in soft fail mode but times out raise AirflowSkipException.
if self.soft_fail:
raise AirflowSkipException(f"Snap. Time is OUT. DAG id: {log_dag_id}")
else:
raise AirflowSensorTimeout(f"Snap. Time is OUT. DAG id: {log_dag_id}")
if self.reschedule:
next_poke_interval = self._get_next_poke_interval(started_at, run_duration, try_number)
reschedule_date = timezone.utcnow() + timedelta(seconds=next_poke_interval)
if _is_metadatabase_mysql() and reschedule_date > _MYSQL_TIMESTAMP_MAX:
raise AirflowSensorTimeout(
f"Cannot reschedule DAG {log_dag_id} to {reschedule_date.isoformat()} "
f"since it is over MySQL's TIMESTAMP storage limit."
)
raise AirflowRescheduleException(reschedule_date)
else:
time.sleep(self._get_next_poke_interval(started_at, run_duration, try_number))
try_number += 1
self.log.info("Success criteria met. Exiting.")
return xcom_value
def _get_next_poke_interval(
self,
started_at: Union[datetime.datetime, float],
run_duration: Callable[[], float],
try_number: int,
) -> float:
"""Using the similar logic which is used for exponential backoff retry delay for operators."""
if not self.exponential_backoff:
return self.poke_interval
min_backoff = int(self.poke_interval * (2 ** (try_number - 2)))
run_hash = int(
hashlib.sha1(f"{self.dag_id}#{self.task_id}#{started_at}#{try_number}".encode()).hexdigest(),
16,
)
modded_hash = min_backoff + run_hash % min_backoff
delay_backoff_in_seconds = min(modded_hash, timedelta.max.total_seconds() - 1)
new_interval = min(self.timeout - int(run_duration()), delay_backoff_in_seconds)
self.log.info("new %s interval is %s", self.mode, new_interval)
return new_interval
def prepare_for_execution(self) -> BaseOperator:
task = super().prepare_for_execution()
# Sensors in `poke` mode can block execution of DAGs when running
# with single process executor, thus we change the mode to`reschedule`
# to allow parallel task being scheduled and executed
if conf.get('core', 'executor') == "DebugExecutor":
self.log.warning("DebugExecutor changes sensor mode to 'reschedule'.")
task.mode = 'reschedule'
return task
@property
def reschedule(self):
"""Define mode rescheduled sensors."""
return self.mode == 'reschedule'
def poke_mode_only(cls):
"""
Class Decorator for child classes of BaseSensorOperator to indicate
that instances of this class are only safe to use poke mode.
Will decorate all methods in the class to assert they did not change
the mode from 'poke'.
:param cls: BaseSensor class to enforce methods only use 'poke' mode.
"""
def decorate(cls_type):
def mode_getter(_):
return 'poke'
def mode_setter(_, value):
if value != 'poke':
raise ValueError("cannot set mode to 'poke'.")
if not issubclass(cls_type, BaseSensorOperator):
raise ValueError(
f"poke_mode_only decorator should only be "
f"applied to subclasses of BaseSensorOperator,"
f" got:{cls_type}."
)
cls_type.mode = property(mode_getter, mode_setter)
return cls_type
return decorate(cls)
|
py | 1a3bd8a31c57e41b4710edf917d4f0bdf04764ff | # -*- coding: utf-8 -*-
# Copyright (c) 2017, Zefa Consulting and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class PAYETaxRebates(Document):
pass
|
py | 1a3bd8cac582ffbe7357053cac8c2b32f5e52861 | from os.path import join
from os.path import exists
import torch
import argparse
import os
import torch.nn.functional as F
import models
from evaluation.PerceptualSimilarity.models import PerceptualLoss
from evaluation.PerceptualSimilarity.util import util
import glob
import pickle
import numpy as np
def plot_vid(vids, boxes_gt=None, boxes_pred=None):
vids = vids.cpu().numpy()
vids = np.transpose(vids, [0, 2, 3, 1])
output_imgs = []
for i in range(0, vids.shape[0], 1):
img = np.clip((vids[i] * np.array([0.229, 0.224, 0.225]) + np.array([0.485, 0.456, 0.406])) * 255, 0,
255).astype('uint8').copy()
normalized_img = util.im2tensor(img) # RGB image from [-1,1]
# normalized_img = F.interpolate(normalized_img, size=64)
output_imgs.append(normalized_img)
return torch.cat(output_imgs)
def get_video_from_pkl(ff):
video_tensor = ff['image']
# Remove the first batch dim if exits
if len(video_tensor.size()) == 5:
video_tensor = video_tensor.squeeze()
video = plot_vid(video_tensor)
return video
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-d0', '--dir0', type=str, default='./imgs/ex_dir0')
parser.add_argument('-d1', '--dir1', type=str, default='./imgs/ex_dir1')
parser.add_argument('-o', '--out', type=str, default='./imgs/example_dists.txt')
parser.add_argument('--use_gpu', action='store_true', help='turn on flag to use GPU')
opt = parser.parse_args()
## Initializing the model
model = PerceptualLoss(model='net-lin', net='alex', use_gpu=opt.use_gpu)
# crawl directories
files = glob.glob(opt.dir0 + '/*.pkl')
videos = [os.path.basename(fl) for fl in files]
res = []
for vid in videos:
if exists(join(opt.dir1, vid)):
# Load pickles
f0 = pickle.load(open(join(opt.dir0, vid), 'rb'))
f1 = pickle.load(open(join(opt.dir1, vid), 'rb'))
img0 = get_video_from_pkl(f0) # RGB images from [-1,1]
img1 = get_video_from_pkl(f1)
# Load images
# img0 = util.im2tensor(util.load_image(os.path.join(opt.dir0, folder, file)))
# img1 = util.im2tensor(util.load_image(os.path.join(opt.dir1, folder, file)))
if (opt.use_gpu):
img0 = img0.cuda()
img1 = img1.cuda()
# Compute distance
dist01 = model.forward(img0, img1)
# print('%s: %.3f' % (file, dist01))
res.append(dist01.mean())
# Save
np.save(opt.out, torch.stack(res).data.cpu().numpy())
mean = torch.mean(torch.stack(res))
std = torch.std(torch.stack(res))
print("Diversity: {}±{}".format(mean, std))
|
py | 1a3bdaf7fc2d64965dc660387b72924bd66e0793 | from decimal import Decimal
from helpers import *
import requests
TICKER_URL = 'https://cex.io/api/ticker/BTC/USD'
def get_current_price():
data = get_response(TICKER_URL)
price = data['last']
return Decimal(price)
def get_current_bid():
data = get_response(TICKER_URL)
price = data['bid']
return Decimal(price)
def get_current_ask():
data = get_response(TICKER_URL)
price = data['ask']
return Decimal(price) |
py | 1a3bdb2942ee1cd622296823b73787d791b2a731 | """
Define the NonlinearRunOnce class.
This is a simple nonlinear solver that just runs the system once.
"""
from openmdao.recorders.recording_iteration_stack import Recording
from openmdao.solvers.solver import NonlinearSolver
from openmdao.utils.general_utils import warn_deprecation
from openmdao.utils.mpi import multi_proc_fail_check
class NonlinearRunOnce(NonlinearSolver):
"""
Simple solver that runs the containing system once.
This is done without iteration or norm calculation.
"""
SOLVER = 'NL: RUNONCE'
def solve(self):
"""
Run the solver.
"""
system = self._system
with Recording('NLRunOnce', 0, self) as rec:
# If this is a parallel group, transfer all at once then run each subsystem.
if len(system._subsystems_myproc) != len(system._subsystems_allprocs):
system._transfer('nonlinear', 'fwd')
with multi_proc_fail_check(system.comm):
for subsys in system._subsystems_myproc:
subsys._solve_nonlinear()
system._check_child_reconf()
# If this is not a parallel group, transfer for each subsystem just prior to running it.
else:
self._gs_iter()
rec.abs = 0.0
rec.rel = 0.0
def _declare_options(self):
"""
Declare options before kwargs are processed in the init method.
"""
# Remove unused options from base options here, so that users
# attempting to set them will get KeyErrors.
self.options.undeclare("atol")
self.options.undeclare("rtol")
# this solver does not iterate
self.options.undeclare("maxiter")
self.options.undeclare("err_on_maxiter") # Deprecated option.
self.options.undeclare("err_on_non_converge")
class NonLinearRunOnce(NonlinearRunOnce):
"""
Deprecated. See NonlinearRunOnce.
"""
def __init__(self, *args, **kwargs):
"""
Deprecated.
Parameters
----------
*args : list of object
Positional args.
**kwargs : dict
Named args.
"""
super(NonLinearRunOnce, self).__init__(*args, **kwargs)
warn_deprecation('NonLinearRunOnce is deprecated. Use NonlinearRunOnce instead.')
|
py | 1a3bdb7e7be7d959ce843076c3cc595090208146 | # -*- coding: utf-8 -*-
#
# txThings asyncio branch documentation build configuration file, created by
# sphinx-quickstart on Wed Jun 4 09:40:16 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('.'))
# maybe required for readthedocs
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'aiocoap_index',
'sphinxarg.ext',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'aiocoap'
copyright = u'2014, Maciej Wasilak, Christian Amsüss'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.4'
# The full version, including alpha/beta/rc tags.
release = '0.4b2.post0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logo.svg'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logo-square.svg'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
# it's in most cases just autodoc text
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'aiocoap'
autodoc_member_order = 'bysource'
man_pages = [
('module/aiocoap.cli.client', 'aiocoap-client', 'query CoAP servers from the command line', '', 1),
('module/aiocoap.cli.proxy', 'aiocoap-proxy', 'forward and reverse proxy server for CoAP', '', 1),
('module/aiocoap.cli.rd', 'aiocoap-rd', 'Resource Directory server', '', 1),
('module/aiocoap.cli.fileserver', 'aiocoap-fileserver', 'File server for CoAP', '', 1),
]
|
py | 1a3bdeb0d19f8c987f9d2bd6a769fb8b7606a25e | """Treadmill install dependencies"""
import logging
import click
from treadmill import TREADMILL
from subprocess import call
_LOGGER = logging.getLogger(__name__)
def init():
"""Install treadmill dependencies"""
@click.command()
def pid1():
"""Install dependencies"""
call(TREADMILL + '/local/linux/scripts/install_pid1.sh')
return pid1
|
py | 1a3bdeb6b604bb46516e6907b5c80488f1d0c4b9 | import tempfile
import time
import os
import os.path
from ovos_utils.log import LOG
def get_ipc_directory(domain=None, config=None):
"""Get the directory used for Inter Process Communication
Files in this folder can be accessed by different processes on the
machine. Useful for communication. This is often a small RAM disk.
Args:
domain (str): The IPC domain. Basically a subdirectory to prevent
overlapping signal filenames.
config (dict): mycroft.conf, to read ipc directory from
Returns:
str: a path to the IPC directory
"""
if config is None:
from ovos_utils.configuration import read_mycroft_config
config = read_mycroft_config()
path = config.get("ipc_path")
if not path:
# If not defined, use /tmp/mycroft/ipc
path = os.path.join(tempfile.gettempdir(), "mycroft", "ipc")
return ensure_directory_exists(path, domain)
def ensure_directory_exists(directory, domain=None):
""" Create a directory and give access rights to all
Args:
domain (str): The IPC domain. Basically a subdirectory to prevent
overlapping signal filenames.
Returns:
str: a path to the directory
"""
if domain:
directory = os.path.join(directory, domain)
# Expand and normalize the path
directory = os.path.normpath(directory)
directory = os.path.expanduser(directory)
if not os.path.isdir(directory):
try:
save = os.umask(0)
os.makedirs(directory, 0o777) # give everyone rights to r/w here
except OSError:
LOG.warning("Failed to create: " + directory)
pass
finally:
os.umask(save)
return directory
def create_file(filename):
""" Create the file filename and create any directories needed
Args:
filename: Path to the file to be created
"""
try:
os.makedirs(os.path.dirname(filename))
except OSError:
pass
with open(filename, 'w') as f:
f.write('')
def create_signal(signal_name, config=None):
"""Create a named signal
Args:
signal_name (str): The signal's name. Must only contain characters
valid in filenames.
config (dict): mycroft.conf, to read ipc directory from
"""
try:
path = os.path.join(get_ipc_directory(config=config),
"signal", signal_name)
create_file(path)
return os.path.isfile(path)
except IOError:
return False
def check_for_signal(signal_name, sec_lifetime=0, config=None):
"""See if a named signal exists
Args:
signal_name (str): The signal's name. Must only contain characters
valid in filenames.
sec_lifetime (int, optional): How many seconds the signal should
remain valid. If 0 or not specified, it is a single-use signal.
If -1, it never expires.
config (dict): mycroft.conf, to read ipc directory from
Returns:
bool: True if the signal is defined, False otherwise
"""
path = os.path.join(get_ipc_directory(config=config),
"signal", signal_name)
if os.path.isfile(path):
if sec_lifetime == 0:
# consume this single-use signal
os.remove(path)
elif sec_lifetime == -1:
return True
elif int(os.path.getctime(path) + sec_lifetime) < int(time.time()):
# remove once expired
os.remove(path)
return False
return True
# No such signal exists
return False
|
py | 1a3bdee9cf1d2af084621c71e52796ea3266897c | """
Author: Zeliha Ural Merpez
Date: March,13 2021
"""
import requests
import json
import pandas as pd
from bs4 import BeautifulSoup
import altair as alt
import numpy as np
from math import sin, cos, sqrt, atan2, radians
import matplotlib.pyplot as plt
def get_keys(path):
with open(path) as f:
return json.load(f)
def get_address_google(name_list, API_Key, city_list=0):
placesAPI_data = pd.DataFrame(
columns=["name", "formatted_address", "geometry", "permanently_closed"]
) # initialize dataframe
if isinstance(name_list, str):
name_list = [name_list]
for i in range(len(name_list)):
if city_list == 0:
city = ""
else:
city = city_list[i]
name = (
name_list[i].replace(" ", "%20").replace("&", "%26")
) # make sure there are no blank spaces for the URL and deal with &
b = "!@#$()"
for char in b:
name = name.replace(char, "")
address_search = name + ",%20" + city
url = (
"https://maps.googleapis.com/maps/api/place/findplacefromtext/json?input="
+ address_search
+ "&inputtype=textquery&fields=name,formatted_address,geometry,permanently_closed&key="
+ API_Key
)
response = requests.get(url).json()
placesAPI_data = pd.concat(
[placesAPI_data, pd.DataFrame(response["candidates"])],
ignore_index=True,
sort=False,
) # append retrieved information to a dataframe
google_data = placesAPI_data
lat_list = []
lng_list = []
for i in range(google_data.shape[0]):
lat_list.append(google_data["geometry"][i]["location"]["lat"])
lng_list.append(google_data["geometry"][i]["location"]["lng"])
google_data["lat"] = lat_list
google_data["lng"] = lng_list
return google_data
def format_coordinate(dataframe, drop=False):
df = dataframe
if drop:
df = df.dropna(subset=["Geom"])
df[["drop_this", "location"]] = df.Geom.str.split("[", expand=True)
df[["lng", "lat"]] = df.location.str.split(",", expand=True)
df["lat"] = df.lat.str.replace("]}", "")
df = df.drop(columns=["drop_this", "location"])
return df
def get_distance_by_coordinate(formatted_business, name, API_Key):
google_data = get_address_google(name, API_Key)
google_name = google_data["name"][0]
filter_name = formatted_business[formatted_business["BusinessName"] == name]
if filter_name.shape[0] == 0:
warn = (
"No geometric information provided for "
+ filter_name
+ " in Business Licences data."
)
return warn, 5000
else:
lat = float(filter_name[["lat"]].iloc[0])
lng = float(filter_name[["lng"]].iloc[0])
if google_data.shape[0] == 0:
warn = "Could not find information about " + filter_name + " on Google maps."
return warn, 5000
else:
google_lat = google_data["lat"][0]
google_lng = google_data["lng"][0]
warn = "Giving distance between geometric information obtained."
dlon = radians(lng) - radians(google_lng)
dlat = radians(lat) - radians(google_lat)
a = (sin(dlat / 2)) ** 2 + cos(radians(lat)) * cos(radians(google_lat)) * (
sin(dlon / 2)
) ** 2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
R = 6373.0
distance = R * c
return warn, distance, google_name
def get_comparison_dataframe(funerals, API_Key):
name_list = list(funerals["BusinessName"])
distance_list = []
warn_list = []
google_name_list = []
for i in range(len(name_list)):
warn, distance, google_name = get_distance_by_coordinate(
funerals, name=name_list[i], API_Key = API_Key
)
distance_list.append(distance)
warn_list.append(warn)
google_name_list.append(google_name)
distance_data = pd.DataFrame(
{
"Name": name_list,
"Google Name": google_name_list,
"Distance(km)": distance_list,
"Warning": warn_list,
}
)
return distance_data
def google_map_figure(output_data):
chart = alt.Chart(output_data, title = "Comparing Distance Between locations of Businesses (Licence vs GoogleMaps)").mark_circle(size = 50).encode(
x = alt.X('Distance(km)'),
y = alt.Y('Name',axis=alt.Axis(title=" ")),
color = alt.Color('Google Name'),
tooltip = 'Google Name'
)
return chart
def fancy_table(data, col_width=3.0, row_height=0.625, row_colors=['#f1f1f2', 'w'],
header_columns=0, ax=None, **kwargs):
"""[Modified from ref: https://stackoverflow.com/questions/19726663/how-to-save-the-pandas-dataframe-series-data-as-a-figure]
[Prints given dataframe in a nice format, that is easy to save]
Parameters
----------
data : [data frame]
[data frame]
col_width : float, optional
[column width], by default 3.0
row_height : float, optional
[row height], by default 0.625
row_colors : list, optional
[row color], by default ['#f1f1f2', 'w']
header_columns : int, optional
[header columns], by default 0
ax : [type], optional
[plotting table, by default None
Returns
-------
[object]
[figure]
"""
if ax is None:
size = (np.array(data.shape[::-1]) + np.array([0, 1])) * np.array([col_width, row_height])
fig, ax = plt.subplots(figsize=size)
ax.axis('off')
mpl_table = ax.table(cellText=data.values, bbox=[0, 0, 1, 1], colLabels=data.columns, **kwargs)
mpl_table.auto_set_font_size(False)
mpl_table.set_fontsize(14)
for k, cell in mpl_table._cells.items():
cell.set_edgecolor('w')
if k[0] == 0 or k[1] < header_columns:
cell.set_text_props(weight='bold', color='w')
cell.set_facecolor('firebrick')
else:
cell.set_facecolor(row_colors[k[0]%len(row_colors) ])
return ax.get_figure(), ax
def generate_dataset_overview(data_frame):
"""
Generates an overview of the dataset.
Also saves resulting table as file in given output folder.
Parameters:
-----------
data_frame : pandas.DataFrame
input path to be verified
output_folder : str
output folder path to save the chart
file_name : str
file name for generated chart image
Returns:
-----------
None
"""
data_overview = [
{"Dataset": "Number of features", "Value": len(data_frame.columns)},
{"Dataset": "Number of characters", "Value": len(data_frame)},
{"Dataset": "Number of Missing cells", "Value": (data_frame.isnull()).sum().sum()},
{"Dataset": "Percentage of Missing cells", "Value": round((data_frame.isnull()).sum().sum()/data_frame.size*100, 2)}
]
overview_frame = pd.DataFrame(data_overview)
return overview_frame
def generate_feature_overview(data_frame):
"""
Generates an overview of the features in dataset.
Also saves resulting table as file in given output folder.
Parameters:
-----------
data_frame : pandas.DataFrame
input path to be verified
output_folder : str
output folder path to save the chart
file_name : str
file name for generated chart image
Returns:
-----------
None
"""
distinct_class = dict()
nonnull_count = dict()
for col in data_frame.columns:
nonnull_count[col]=len(data_frame)-data_frame[col].isnull().sum()
distinct_class[col]=len(list(data_frame[col].unique()))
features_frame=pd.DataFrame([distinct_class, nonnull_count]).T.reset_index()
features_frame.columns=["Features","Distinct Class", "Non-Null Count"]
features_frame["Missing Percentage"]=round((len(data_frame) - features_frame["Non-Null Count"])/len(data_frame)*100,2)
return features_frame
|
py | 1a3bdef56fb3e0b9f0db6542a9cd906e7ed6c221 | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from compas.geometry import Curve
from compas_rhino.conversions import point_to_rhino
from compas_rhino.conversions import point_to_compas
from compas_rhino.conversions import vector_to_compas
from compas_rhino.conversions import xform_to_rhino
from compas_rhino.conversions import plane_to_compas_frame
from compas_rhino.conversions import box_to_compas
class RhinoCurve(Curve):
"""Class representing a general curve object.
Parameters
----------
name : str, optional
Name of the curve.
Attributes
----------
dimension : int, read-only
The spatial dimension of the curve.
domain : tuple[float, float], read-only
The parameter domain.
start : :class:`~compas.geometry.Point`, read-only
The point corresponding to the start of the parameter domain.
end : :class:`~compas.geometry.Point`, read-only
The point corresponding to the end of the parameter domain.
is_closed : bool, read-only
True if the curve is closed.
is_periodic : bool, read-only
True if the curve is periodic.
Other Attributes
----------------
rhino_curve : :rhino:`Curve`
The underlying Rhino curve.
"""
def __init__(self, name=None):
super(RhinoCurve, self).__init__(name=name)
self._rhino_curve = None
def __eq__(self, other):
return self.rhino_curve.IsEqual(other.rhino_curve)
# ==============================================================================
# Data
# ==============================================================================
# ==============================================================================
# Properties
# ==============================================================================
@property
def rhino_curve(self):
return self._rhino_curve
@rhino_curve.setter
def rhino_curve(self, curve):
self._rhino_curve = curve
@property
def dimension(self):
if self.rhino_curve:
return self.rhino_curve.Dimension
@property
def domain(self):
if self.rhino_curve:
return self.rhino_curve.Domain.T0, self.rhino_curve.Domain.T1
@property
def start(self):
if self.rhino_curve:
return point_to_compas(self.rhino_curve.PointAtStart)
@property
def end(self):
if self.rhino_curve:
return point_to_compas(self.rhino_curve.PointAtEnd)
@property
def is_closed(self):
if self.rhino_curve:
return self.rhino_curve.IsClosed
@property
def is_periodic(self):
if self.rhino_curve:
return self.rhino_curve.IsPeriodic
# ==============================================================================
# Constructors
# ==============================================================================
@classmethod
def from_rhino(cls, rhino_curve):
"""Construct a curve from an existing Rhino curve.
Parameters
----------
rhino_curve : Rhino.Geometry.Curve
Returns
-------
:class:`~compas_rhino.geometry.RhinoCurve`
"""
curve = cls()
curve.rhino_curve = rhino_curve
return curve
# ==============================================================================
# Conversions
# ==============================================================================
# ==============================================================================
# Methods
# ==============================================================================
def copy(self):
"""Make an independent copy of the current curve.
Returns
-------
:class:`~compas_rhino.geometry.RhinoCurve`
"""
cls = type(self)
curve = cls()
curve.rhino_curve = self.rhino_curve.Duplicate()
return curve
def transform(self, T):
"""Transform this curve.
Parameters
----------
T : :class:`~compas.geometry.Transformation`
A COMPAS transformation.
Returns
-------
None
"""
self.rhino_curve.Transform(xform_to_rhino(T))
def reverse(self):
"""Reverse the parametrisation of the curve.
Returns
-------
None
"""
self.rhino_curve.Reverse()
def point_at(self, t):
"""Compute a point on the curve.
Parameters
----------
t : float
The value of the curve parameter. Must be between 0 and 1.
Returns
-------
:class:`~compas.geometry.Point`
the corresponding point on the curve.
"""
point = self.rhino_curve.PointAt(t)
return point_to_compas(point)
def tangent_at(self, t):
"""Compute the tangent vector at a point on the curve.
Parameters
----------
t : float
The value of the curve parameter. Must be between 0 and 1.
Returns
-------
:class:`~compas.geometry.Vector`
The corresponding tangent vector.
"""
vector = self.rhino_curve.TangentAt(t)
return vector_to_compas(vector)
def curvature_at(self, t):
"""Compute the curvature at a point on the curve.
Parameters
----------
t : float
The value of the curve parameter. Must be between 0 and 1.
Returns
-------
:class:`~compas.geometry.Vector`
The corresponding curvature vector.
"""
vector = self.rhino_curve.CurvatureAt(t)
return vector_to_compas(vector)
def frame_at(self, t):
"""Compute the local frame at a point on the curve.
Parameters
----------
t : float
The value of the curve parameter.
Returns
-------
:class:`~compas.geometry.Frame`
The corresponding local frame.
"""
plane = self.rhino_curve.FrameAt(t)
return plane_to_compas_frame(plane)
def torsion_at(self, t):
"""Compute the torsion of the curve at a parameter.
Parameters
----------
t : float
The value of the curve parameter.
Returns
-------
float
The torsion value.
"""
return self.rhino_curve.TorsionAt(t)
# ==============================================================================
# Methods continued
# ==============================================================================
def closest_point(self, point, return_parameter=False):
"""Compute the closest point on the curve to a given point.
Parameters
----------
point : :class:`~compas.geometry.Point`
The test point.
return_parameter : bool, optional
If True, the parameter corresponding to the closest point should be returned in addition to the point.
Returns
-------
:class:`~compas.geometry.Point` | tuple[:class:`~compas.geometry.Point`, float]
If `return_parameter` is False, only the closest point is returned.
If `return_parameter` is True, the closest point and the corresponding parameter are returned.
"""
result, t = self.rhino_curve.ClosestPoint(point_to_rhino(point))
if not result:
return
point = self.point_at(t)
if return_parameter:
return point, t
return point
def divide_by_count(self, count, return_points=False):
"""Divide the curve into a specific number of equal length segments.
Parameters
----------
count : int
The number of segments.
return_points : bool, optional
If True, return the list of division parameters,
and the points corresponding to those parameters.
If False, return only the list of parameters.
Returns
-------
list[float] | tuple[list[float], list[:class:`~compas.geometry.Point`]]
If `return_points` is False, the parameters of the discretisation.
If `return_points` is True, a list of points in addition to the parameters of the discretisation.
"""
params = self.rhino_curve.DivideByCount(count, True)
if return_points:
points = [self.point_at(t) for t in params]
return params, points
return params
def divide_by_length(self, length, return_points=False):
"""Divide the curve into segments of specified length.
Parameters
----------
length : float
The length of the segments.
return_points : bool, optional
If True, return the list of division parameters,
and the points corresponding to those parameters.
If False, return only the list of parameters.
Returns
-------
list[float] | tuple[list[float], list[:class:`~compas.geometry.Point`]]
If `return_points` is False, the parameters of the discretisation.
If `return_points` is True, a list of points in addition to the parameters of the discretisation.
"""
params = self.rhino_curve.DivideByLength(length, True)
if return_points:
points = [self.point_at(t) for t in params]
return params, points
return params
def aabb(self):
"""Compute the axis aligned bounding box of the curve.
Returns
-------
:class:`~compas.geometry.Box`
"""
box = self.rhino_curve.getBoundingBox(True)
return box_to_compas(box)
def length(self, precision=1e-8):
"""Compute the length of the curve.
Parameters
----------
precision : float, optional
Required precision of the calculated length.
"""
return self.rhino_curve.GetLength(precision)
|
py | 1a3bdf221419a3989d129cc742e1c69fa0cdc63d | # Load selenium components
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait, Select
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
import time
def course_desc():
# Establish chrome driver and go to report site URL
url = "https://enr-apps.as.cmu.edu/open/SOC/SOCServlet/search"
driver = webdriver.Chrome()
driver.maximize_window()
driver.get(url)
count = 0
driver.find_element_by_xpath("/html/body/div/div[2]/form/div[3]/div/div/button[1]").click()
tables = driver.find_elements_by_id("search-results-table")
input_path = './data/temp/course_description_1.txt'
f = open(input_path,"a+")
# Crawl course description through full x_path matching
for tab_num in range(2,len(tables)):
courses = tables[tab_num].find_elements_by_tag_name('tr')
for i in range(1,len(courses)):
path = "/html/body/div/div[2]/table["+str(tab_num+1)+"]/tbody/tr["+str(i)+"]/td[1]/a"
try:
handler = driver.find_element_by_xpath(path)
cID = handler.text
driver.execute_script("arguments[0].scrollIntoView();", handler)
handler.click()
# If the row is a subrow of a specific course, skip it
except:
continue
# Wait for the website to response
time.sleep(3)
description = driver.find_element_by_class_name("text-left").text
f.write(cID+":"+description)
f.write('\n')
driver.find_element_by_class_name("close").click()
f.close()
|
py | 1a3bdf93fae5c1a870325a4c2db918dca0a8a4de | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals
import unittest
import os
from pymatgen.io.feff.outputs import LDos, Xmu
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files')
test_dir_reci = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files', 'feff_reci_dos')
class FeffLdosTest(unittest.TestCase):
filepath1 = os.path.join(test_dir, 'feff.inp')
filepath2 = os.path.join(test_dir, 'ldos')
l = LDos.from_file(filepath1, filepath2)
reci_feffinp = os.path.join(test_dir_reci, 'feff.inp')
reci_ldos = os.path.join(test_dir_reci, 'ldos')
reci_dos = LDos.from_file(reci_feffinp, reci_ldos)
def test_init(self):
efermi = FeffLdosTest.l.complete_dos.efermi
self.assertEqual(efermi, -11.430,
"Did not read correct Fermi energy from ldos file")
def test_complete_dos(self):
complete_dos = FeffLdosTest.l.complete_dos
self.assertEqual(complete_dos.as_dict()['spd_dos']["s"]['efermi'],
- 11.430,
"Failed to construct complete_dos dict properly")
def test_as_dict_and_from_dict(self):
l2 = FeffLdosTest.l.charge_transfer_to_string()
d = FeffLdosTest.l.as_dict()
l3 = LDos.from_dict(d).charge_transfer_to_string()
self.assertEqual(l2, l3, "Feffldos to and from dict does not match")
def test_reci_init(self):
efermi = FeffLdosTest.reci_dos.complete_dos.efermi
self.assertEqual(efermi, -9.672,
"Did not read correct Fermi energy from ldos file")
def test_reci_complete_dos(self):
complete_dos = FeffLdosTest.reci_dos.complete_dos
self.assertEqual(complete_dos.as_dict()['spd_dos']["s"]['efermi'],
-9.672,
"Failed to construct complete_dos dict properly")
def test_reci_charge(self):
charge_trans = FeffLdosTest.reci_dos.charge_transfer
self.assertEqual(charge_trans['0']['Na']['s'], 0.241)
self.assertEqual(charge_trans['1']['O']['tot'], -0.594)
class XmuTest(unittest.TestCase):
def test_init(self):
filepath1 = os.path.join(test_dir, 'xmu.dat')
filepath2 = os.path.join(test_dir, 'feff.inp')
x = Xmu.from_file(filepath1, filepath2)
self.assertEqual(x.absorbing_atom, 'O',
"failed to read xmu.dat file properly")
def test_as_dict_and_from_dict(self):
filepath1 = os.path.join(test_dir, 'xmu.dat')
filepath2 = os.path.join(test_dir, 'feff.inp')
x = Xmu.from_file(filepath1, filepath2)
data=x.data.tolist()
d=x.as_dict()
x2 = Xmu.from_dict(d)
data2= x2.data.tolist()
self.assertEqual(data, data2, "Xmu to and from dict does not match")
if __name__ == '__main__':
unittest.main()
|
py | 1a3bdfbf86d84b2e18b3d0505c8e928edb861b7c | _base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/lvis_instance.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
model = dict(
roi_head=dict(
bbox_head=dict(num_classes=1230), mask_head=dict(num_classes=1230)))
test_cfg = dict(
rcnn=dict(
score_thr=0.0001,
# LVIS allows up to 300
max_per_img=300))
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
data = dict(train=dict(dataset=dict(pipeline=train_pipeline)))
|
py | 1a3be04cccc62209931075fe2947d10eb9adff19 | """Lazy version of the dataset for training TDC and CMC."""
from typing import Any, List, Tuple
import cv2
import librosa
import numpy as np
import torch
from skvideo.io import FFmpegReader
from torch.utils.data import Dataset
class LazyTDCCMCDataset(Dataset):
"""
Dataset for training TDC and CMC.
Dataset for sampling video frames and audio snippets with distance
labels to train the embedding networks.
Parameters
----------
filenames : list of str
List of filenames of video files.
trims : list of float
List of tuples `(begin_idx, end_idx)` that specify what frame
of the video to start and end.
crops : list of tuple
List of tuples `(x_1, y_1, x_2, y_2)` that define the clip
window of each video.
frame_rate : int
Frame rate to sample video. Default to 15.
"""
def __init__(
self,
filenames: List[str],
trims: List[Tuple[int, int]],
crops: List[Tuple[int, int, int, int]],
frame_rate: float = 15,
):
# TDCCMCDataset is an unconvential dataset, where each data is
# dynamically sampled whenever needed instead of a static dataset.
# Therefore, in `__init__`, we do not define a static dataset. Instead,
# we simply preprocess the video and audio for faster `__getitem__`.
super().__init__()
self.filenames = filenames
self.trims = trims
self.crops = crops
self.audios: List[np.ndarray] = []
self.readers: List[Any] = []
for filename in filenames:
# Get video frames with scikit-video
reader = FFmpegReader(
filename + ".mp4",
inputdict={"-r": str(frame_rate)},
outputdict={"-r": str(frame_rate)},
)
self.readers.append(reader)
# STFT audio
# TODO Magic number sr=2000, n_fft=510
y, _ = librosa.load(filename + ".wav", sr=2000)
D = librosa.core.stft(y, n_fft=510)
D = np.abs(D)
# Save audio
self.audios.append(D)
def __len__(self) -> int:
# Return a high number since this dataset in dynamic. Don't use
# this explicitly!
return np.iinfo(np.int64).max
def __getitem__(
self, index: int
) -> Tuple[
torch.FloatTensor,
torch.FloatTensor,
torch.FloatTensor,
torch.LongTensor,
torch.LongTensor,
]:
"""
Return a sample from the dynamic dataset.
Each sample contains two video frames, one audio snippet, one
TDC label and one CMC label. In other words, the format is
(frame_v, frame_w, audio_a, tdc_label, cmc_label).
Parameters
----------
index : int
Returns
-------
frame_v : torch.FloatTensor
frame_w : torch.FloatTensor
audio_a
tdc_label : torch.LongTensor
cmc_label : torch.LongTensor
"""
# Below is a paragraph from the original paper:
#
# To generate training data, we sample input pairs (v^i, w^i) (where
# v^i and w^i are sampled from the same domain) as follows. First, we
# sample a demonstration sequence from our three training videos. Next,
# we sample both an interval, d_k ∈ {[0], [1], [2], [3 - 4], [5 - 20],
# [21 - 200]}, and a distance, ∆t ∈ dk. Finally, we randomly select a
# pair of frames from the sequence with temporal distance ∆t. The model
# is trained with Adam using a learning rate of 10^-4 and batch size of
# 32 for 200,000 steps.
#
# From Section 5: Implementation Details
# 1) Sample video from videos
src_idx = np.random.choice(len(self.audios))
reader = self.readers[src_idx]
audio = self.audios[src_idx]
trim = self.trims[src_idx]
crop = self.crops[src_idx]
# 2) Sample tdc_label and cmc_label
tdc_label = self._sample_label()
cmc_label = self._sample_label()
# 3) Sample tdc_distance, cmc_distance
tdc_distance = self._sample_distance_from_label(tdc_label)
cmc_distance = self._sample_distance_from_label(cmc_label)
# 4) Sample framestack_v from video (check limits carefully)
framestack_v_idx = np.random.randint(0, reader.getShape()[0] - tdc_distance - 4)
framestack_v = self._sample_framestack(framestack_v_idx, reader, trim, crop)
# 5) Sample frame_w from video
framestack_w_idx = framestack_v_idx + tdc_distance
framestack_w = self._sample_framestack(framestack_w_idx, reader, trim, crop)
# 6) Sample audio_a from audio
audio_a_idx = framestack_v_idx + cmc_distance
audio_a = audio[:, audio_a_idx : audio_a_idx + 137]
audio_a = torch.FloatTensor(audio_a)
# 7) Crop Frames from 140x140 to 128x128
# TODO Is it correct to use same crop for both v and w?
y = np.random.randint(0, 140 - 128)
x = np.random.randint(0, 140 - 128)
framestack_v = framestack_v[:, :, y : y + 128, x : x + 128]
framestack_w = framestack_w[:, :, y : y + 128, x : x + 128]
# 8) Switch 4 x 3 x 128 x 128 to 1 x 12 x 128 x 128
framestack_v = torch.FloatTensor(framestack_v).view(-1, 128, 128)
framestack_w = torch.FloatTensor(framestack_w).view(-1, 128, 128)
# 9) Scale image values from 0~255 to 0~1
framestack_v /= 255.0
framestack_w /= 255.0
# 10) Return (frame_v, frame_w, audio_a, tdc_label, cmc_label)
return (
framestack_v,
framestack_w,
audio_a,
torch.LongTensor([tdc_label]),
torch.LongTensor([cmc_label]),
)
def _sample_label(self) -> int:
"""
Sample randomly from label.
Returns
-------
label : int
Label sampled from 0 ~ 5.
"""
return np.random.choice(6)
def _sample_distance_from_label(self, label: int) -> int:
"""
Sample randomly from distance from label.
Label 0: Distance 0
Label 1: Distance 1
Label 2: Distance 2
Label 3: Distance sampled from [3, 4]
Label 4: Distance sampled from [5, 20]
Label 5: Distance sampled from [21, 200]
Parameters
----------
label : int
Label sampled randomly.
Returns
-------
distance: int
Distance sampled according to the label.
"""
if label == 0: # [0]
distance = 0
elif label == 1: # [1]
distance = 1
elif label == 2: # [2]
distance = 2
elif label == 3: # [3 - 4]
distance = np.random.choice(np.arange(3, 4 + 1))
elif label == 4: # [5 - 20]
distance = np.random.choice(np.arange(5, 20 + 1))
else: # [21 - 200]
distance = np.random.choice(np.arange(21, 200 + 1))
return distance
def _sample_framestack(
self,
start_frame: int,
reader: Any,
trim: Tuple[int, int],
crop: Tuple[int, int, int, int],
) -> np.ndarray:
assert start_frame + trim[0] + 4 < reader.getShape()[0]
framestack = []
for frame_idx, frame in enumerate(reader.nextFrame()):
# Trim video (time)
if start_frame + trim[0] <= frame_idx < start_frame + trim[0] + 4:
# Crop frames (space)
frame = frame[crop[1] : crop[3], crop[0] : crop[2], :]
framestack.append(cv2.resize(frame, (140, 140)))
if frame_idx == start_frame + trim[0] + 4:
break
# Change to NumPy array with PyTorch dimension format
framestack = np.array(framestack, dtype=float)
framestack = np.transpose(framestack, axes=(0, 3, 1, 2))
return framestack
|
py | 1a3be04d5ca2fe56cf1a0abaf4349c7dcf850b70 | import requests
import json
host = "s-platform.api.opendns.com"
api_key = "a0b1c2d3-e4f5-g6h7-i8j9-kalbmcndoepf"
print(f"\n==> Finding all of the domains in a custom enforcement list")
url = f"https://{host}/1.0/domains?customerKey={api_key}"
headers = {'Authorization':'Bearer ' + api_key}
try:
response = requests.get(url, headers=headers)
except:
response.raise_for_status()
print (response.json()) |
py | 1a3be0a3e0d3d8e174d32bfda374a2fc0a784839 | """Tests for device finding functionality."""
import unittest
from unittest.mock import patch
from pysyncdroid.exceptions import DeviceException
from pysyncdroid.find_device import (
get_connection_details,
get_mtp_details,
lsusb,
)
mock_lsub_parts = [
"Bus 002 Device 001: ID 0123:0001 test_vendor test_model1",
"Bus 002 Device 002: ID 0456:0002 test_vendor test_model2",
"Bus 002 Device 003: ID 0789:0003 test_vendor test_model3",
"Bus 002 Device 001: ID 1d6b:0002 Linux Foundation 2.0 root hub",
"Bus 004 Device 001: ID 1d6b:0003 Linux Foundation 3.0 root hub",
]
MOCK_LSUB_RESULT = "\n".join(mock_lsub_parts)
class TestLsusb(unittest.TestCase):
def setUp(self):
self.patcher = patch("pysyncdroid.find_device.run_bash_cmd")
self.mock_run_bash_cmd = self.patcher.start()
def tearDown(self):
self.patcher.stop()
def test_lsusb(self):
lsusb()
self.mock_run_bash_cmd.assert_called_with(["lsusb"])
class TestFindDevice(unittest.TestCase):
def setUp(self):
self.patcher = patch("pysyncdroid.find_device.lsusb")
self.mock_lsusb = self.patcher.start()
self.mock_lsusb.return_value = MOCK_LSUB_RESULT
def tearDown(self):
self.patcher.stop()
def test_get_connection_details_device_exception(self):
"""
Test 'get_connection_details' raises a DeviceException with an
appropriate error message when trying to find a non-existent device.
"""
with self.assertRaises(DeviceException) as exc:
get_connection_details(
vendor="non-existent-vendor", model="non-existent-model"
)
exc_msg_parts = (
'Device "non-existent-vendor non-existent-model" not found.',
'No "non-existent-vendor" devices were found.',
)
self.assertEqual(str(exc.exception), "\n".join(exc_msg_parts))
def test_get_connection_details_device_exception_message(self):
"""
Test 'get_connection_details' raises a DeviceException and the provided
error message lists all vendor devices when trying to find a
non-existent model.
"""
with self.assertRaises(DeviceException) as exc:
get_connection_details(vendor="linux", model="non-existent-model")
exc_msg_parts = (
'Device "linux non-existent-model" not found.',
'Following "linux" devices were found:',
"Linux Foundation 2.0 root hub",
"Linux Foundation 3.0 root hub",
)
self.assertEqual(str(exc.exception), "\n".join(exc_msg_parts))
def test_get_connection_details_multiple_devices(self):
"""
Test 'get_connection_details' is able to find the given device in case
of multiple devices from the same vendor (i.e. it doesn't pick up the
first device for a certain vendor).
"""
connection_details = get_connection_details(
vendor="test_vendor", model="test_model3"
)
self.assertIsInstance(connection_details, tuple)
self.assertEqual(connection_details[0], "002")
self.assertEqual(connection_details[1], "003")
def test_get_mtp_details(self):
"""
Test 'get_mtp_details' returns a valid MTP url gvfs path.
"""
usb_bus, device = get_connection_details(vendor="linux", model="root")
mtp_details = get_mtp_details(usb_bus, device)
self.assertIsInstance(mtp_details, tuple)
for mtp_detail in mtp_details:
self.assertIn(device, mtp_detail)
self.assertIn(usb_bus, mtp_detail)
|
py | 1a3be194be3e9385d71f5ba801b198b75f1432b3 | # -*- coding: utf-8 -*-
# Copyright (C) 2013 Rackspace Hosting Inc. All Rights Reserved.
# Copyright (C) 2013 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class JobBoard(object):
"""A jobboard is an abstract representation of a place where jobs
can be posted, reposted, claimed and transferred. There can be multiple
implementations of this job board, depending on the desired semantics and
capabilities of the underlying jobboard implementation.
"""
def __init__(self, name):
self._name = name
@property
def name(self):
"""The non-uniquely identifying name of this jobboard."""
return self._name
@abc.abstractmethod
def consume(self, job):
"""Permanently (and atomically) removes a job from the jobboard,
signaling that this job has been completed by the entity assigned
to that job.
Only the entity that has claimed that job is able to consume a job.
A job that has been consumed can not be reclaimed or reposted by
another entity (job postings are immutable). Any entity consuming
a unclaimed job (or a job they do not own) will cause an exception.
"""
@abc.abstractmethod
def post(self, job):
"""Atomically posts a given job to the jobboard, allowing others to
attempt to claim that job (and subsequently work on that job).
Once a job has been posted it can only be removed by consuming that
job (after that job is claimed). Any entity can post or propose jobs
to the jobboard (in the future this may be restricted).
"""
@abc.abstractmethod
def claim(self, job, who):
"""Atomically attempts to claim the given job for the entity and either
succeeds or fails at claiming by throwing corresponding exceptions.
If a job is claimed it is expected that the entity that claims that job
will at sometime in the future work on that jobs flows and either fail
at completing them (resulting in a reposting) or consume that job from
the jobboard (signaling its completion).
"""
@abc.abstractmethod
def repost(self, job):
"""Atomically reposts the given job on the jobboard, allowing that job
to be reclaimed by others. This would typically occur if the entity
that has claimed the job has failed or is unable to complete the job
or jobs it has claimed.
Only the entity that has claimed that job can repost a job. Any entity
reposting a unclaimed job (or a job they do not own) will cause an
exception.
"""
|
py | 1a3be3e84fac59ebc54a25f76f058656d8c2754b | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handles database requests from other nova services."""
import collections
import contextlib
import copy
import eventlet
import functools
import sys
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_utils import excutils
from oslo_utils import timeutils
from oslo_utils import versionutils
import six
from nova.accelerator import cyborg
from nova import availability_zones
from nova.compute import instance_actions
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute.utils import wrap_instance_event
from nova.compute import vm_states
from nova.conductor.tasks import cross_cell_migrate
from nova.conductor.tasks import live_migrate
from nova.conductor.tasks import migrate
from nova import context as nova_context
from nova.db import base
from nova import exception
from nova.i18n import _
from nova.image import glance
from nova import manager
from nova.network import neutron
from nova import notifications
from nova import objects
from nova.objects import base as nova_object
from nova.objects import fields
from nova import profiler
from nova import rpc
from nova.scheduler.client import query
from nova.scheduler.client import report
from nova.scheduler import utils as scheduler_utils
from nova import servicegroup
from nova import utils
from nova.volume import cinder
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
def targets_cell(fn):
"""Wrap a method and automatically target the instance's cell.
This decorates a method with signature func(self, context, instance, ...)
and automatically targets the context with the instance's cell
mapping. It does this by looking up the InstanceMapping.
"""
@functools.wraps(fn)
def wrapper(self, context, *args, **kwargs):
instance = kwargs.get('instance') or args[0]
try:
im = objects.InstanceMapping.get_by_instance_uuid(
context, instance.uuid)
except exception.InstanceMappingNotFound:
LOG.error('InstanceMapping not found, unable to target cell',
instance=instance)
except db_exc.CantStartEngineError:
# Check to see if we can ignore API DB connection failures
# because we might already be in the cell conductor.
with excutils.save_and_reraise_exception() as err_ctxt:
if CONF.api_database.connection is None:
err_ctxt.reraise = False
else:
LOG.debug('Targeting cell %(cell)s for conductor method %(meth)s',
{'cell': im.cell_mapping.identity,
'meth': fn.__name__})
# NOTE(danms): Target our context to the cell for the rest of
# this request, so that none of the subsequent code needs to
# care about it.
nova_context.set_target_cell(context, im.cell_mapping)
return fn(self, context, *args, **kwargs)
return wrapper
class ConductorManager(manager.Manager):
"""Mission: Conduct things.
The methods in the base API for nova-conductor are various proxy operations
performed on behalf of the nova-compute service running on compute nodes.
Compute nodes are not allowed to directly access the database, so this set
of methods allows them to get specific work done without locally accessing
the database.
The nova-conductor service also exposes an API in the 'compute_task'
namespace. See the ComputeTaskManager class for details.
"""
target = messaging.Target(version='3.0')
def __init__(self, *args, **kwargs):
super(ConductorManager, self).__init__(service_name='conductor',
*args, **kwargs)
self.compute_task_mgr = ComputeTaskManager()
self.additional_endpoints.append(self.compute_task_mgr)
# NOTE(hanlind): This can be removed in version 4.0 of the RPC API
def provider_fw_rule_get_all(self, context):
# NOTE(hanlind): Simulate an empty db result for compat reasons.
return []
def _object_dispatch(self, target, method, args, kwargs):
"""Dispatch a call to an object method.
This ensures that object methods get called and any exception
that is raised gets wrapped in an ExpectedException for forwarding
back to the caller (without spamming the conductor logs).
"""
try:
# NOTE(danms): Keep the getattr inside the try block since
# a missing method is really a client problem
return getattr(target, method)(*args, **kwargs)
except Exception:
raise messaging.ExpectedException()
def object_class_action_versions(self, context, objname, objmethod,
object_versions, args, kwargs):
objclass = nova_object.NovaObject.obj_class_from_name(
objname, object_versions[objname])
args = tuple([context] + list(args))
result = self._object_dispatch(objclass, objmethod, args, kwargs)
# NOTE(danms): The RPC layer will convert to primitives for us,
# but in this case, we need to honor the version the client is
# asking for, so we do it before returning here.
# NOTE(hanlind): Do not convert older than requested objects,
# see bug #1596119.
if isinstance(result, nova_object.NovaObject):
target_version = object_versions[objname]
requested_version = versionutils.convert_version_to_tuple(
target_version)
actual_version = versionutils.convert_version_to_tuple(
result.VERSION)
do_backport = requested_version < actual_version
other_major_version = requested_version[0] != actual_version[0]
if do_backport or other_major_version:
result = result.obj_to_primitive(
target_version=target_version,
version_manifest=object_versions)
return result
def object_action(self, context, objinst, objmethod, args, kwargs):
"""Perform an action on an object."""
oldobj = objinst.obj_clone()
result = self._object_dispatch(objinst, objmethod, args, kwargs)
updates = dict()
# NOTE(danms): Diff the object with the one passed to us and
# generate a list of changes to forward back
for name, field in objinst.fields.items():
if not objinst.obj_attr_is_set(name):
# Avoid demand-loading anything
continue
if (not oldobj.obj_attr_is_set(name) or
getattr(oldobj, name) != getattr(objinst, name)):
updates[name] = field.to_primitive(objinst, name,
getattr(objinst, name))
# This is safe since a field named this would conflict with the
# method anyway
updates['obj_what_changed'] = objinst.obj_what_changed()
return updates, result
def object_backport_versions(self, context, objinst, object_versions):
target = object_versions[objinst.obj_name()]
LOG.debug('Backporting %(obj)s to %(ver)s with versions %(manifest)s',
{'obj': objinst.obj_name(),
'ver': target,
'manifest': ','.join(
['%s=%s' % (name, ver)
for name, ver in object_versions.items()])})
return objinst.obj_to_primitive(target_version=target,
version_manifest=object_versions)
def reset(self):
objects.Service.clear_min_version_cache()
@contextlib.contextmanager
def try_target_cell(context, cell):
"""If cell is not None call func with context.target_cell.
This is a method to help during the transition period. Currently
various mappings may not exist if a deployment has not migrated to
cellsv2. If there is no mapping call the func as normal, otherwise
call it in a target_cell context.
"""
if cell:
with nova_context.target_cell(context, cell) as cell_context:
yield cell_context
else:
yield context
@contextlib.contextmanager
def obj_target_cell(obj, cell):
"""Run with object's context set to a specific cell"""
with try_target_cell(obj._context, cell) as target:
with obj.obj_alternate_context(target):
yield target
@profiler.trace_cls("rpc")
class ComputeTaskManager(base.Base):
"""Namespace for compute methods.
This class presents an rpc API for nova-conductor under the 'compute_task'
namespace. The methods here are compute operations that are invoked
by the API service. These methods see the operation to completion, which
may involve coordinating activities on multiple compute nodes.
"""
target = messaging.Target(namespace='compute_task', version='1.23')
def __init__(self):
super(ComputeTaskManager, self).__init__()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.volume_api = cinder.API()
self.image_api = glance.API()
self.network_api = neutron.API()
self.servicegroup_api = servicegroup.API()
self.query_client = query.SchedulerQueryClient()
self.report_client = report.SchedulerReportClient()
self.notifier = rpc.get_notifier('compute', CONF.host)
# Help us to record host in EventReporter
self.host = CONF.host
def reset(self):
LOG.info('Reloading compute RPC API')
compute_rpcapi.LAST_VERSION = None
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
# TODO(tdurakov): remove `live` parameter here on compute task api RPC
# version bump to 2.x
# TODO(danms): remove the `reservations` parameter here on compute task api
# RPC version bump to 2.x
@messaging.expected_exceptions(
exception.NoValidHost,
exception.ComputeServiceUnavailable,
exception.ComputeHostNotFound,
exception.InvalidHypervisorType,
exception.InvalidCPUInfo,
exception.UnableToMigrateToSelf,
exception.DestinationHypervisorTooOld,
exception.InvalidLocalStorage,
exception.InvalidSharedStorage,
exception.HypervisorUnavailable,
exception.InstanceInvalidState,
exception.MigrationPreCheckError,
exception.UnsupportedPolicyException)
@targets_cell
@wrap_instance_event(prefix='conductor')
def migrate_server(self, context, instance, scheduler_hint, live, rebuild,
flavor, block_migration, disk_over_commit, reservations=None,
clean_shutdown=True, request_spec=None, host_list=None):
if instance and not isinstance(instance, nova_object.NovaObject):
# NOTE(danms): Until v2 of the RPC API, we need to tolerate
# old-world instance objects here
attrs = ['metadata', 'system_metadata', 'info_cache',
'security_groups']
instance = objects.Instance._from_db_object(
context, objects.Instance(), instance,
expected_attrs=attrs)
# NOTE: Remove this when we drop support for v1 of the RPC API
if flavor and not isinstance(flavor, objects.Flavor):
# Code downstream may expect extra_specs to be populated since it
# is receiving an object, so lookup the flavor to ensure this.
flavor = objects.Flavor.get_by_id(context, flavor['id'])
if live and not rebuild and not flavor:
self._live_migrate(context, instance, scheduler_hint,
block_migration, disk_over_commit, request_spec)
elif not live and not rebuild and flavor:
instance_uuid = instance.uuid
with compute_utils.EventReporter(context, 'cold_migrate',
self.host, instance_uuid):
self._cold_migrate(context, instance, flavor,
scheduler_hint['filter_properties'],
clean_shutdown, request_spec,
host_list)
else:
raise NotImplementedError()
@staticmethod
def _get_request_spec_for_cold_migrate(context, instance, flavor,
filter_properties, request_spec):
# NOTE(sbauza): If a reschedule occurs when prep_resize(), then
# it only provides filter_properties legacy dict back to the
# conductor with no RequestSpec part of the payload for <Stein
# computes.
# TODO(mriedem): We can remove this compat code for no request spec
# coming to conductor in ComputeTaskAPI RPC API version 2.0
if not request_spec:
image_meta = utils.get_image_from_system_metadata(
instance.system_metadata)
# Make sure we hydrate a new RequestSpec object with the new flavor
# and not the nested one from the instance
request_spec = objects.RequestSpec.from_components(
context, instance.uuid, image_meta,
flavor, instance.numa_topology, instance.pci_requests,
filter_properties, None, instance.availability_zone,
project_id=instance.project_id, user_id=instance.user_id)
elif not isinstance(request_spec, objects.RequestSpec):
# Prior to compute RPC API 5.1 conductor would pass a legacy dict
# version of the request spec to compute and Stein compute
# could be sending that back to conductor on reschedule, so if we
# got a dict convert it to an object.
# TODO(mriedem): We can drop this compat code when we only support
# compute RPC API >=6.0.
request_spec = objects.RequestSpec.from_primitives(
context, request_spec, filter_properties)
# We don't have to set the new flavor on the request spec because
# if we got here it was due to a reschedule from the compute and
# the request spec would already have the new flavor in it from the
# else block below.
else:
# NOTE(sbauza): Resizes means new flavor, so we need to update the
# original RequestSpec object for make sure the scheduler verifies
# the right one and not the original flavor
request_spec.flavor = flavor
return request_spec
def _cold_migrate(self, context, instance, flavor, filter_properties,
clean_shutdown, request_spec, host_list):
request_spec = self._get_request_spec_for_cold_migrate(
context, instance, flavor, filter_properties, request_spec)
task = self._build_cold_migrate_task(context, instance, flavor,
request_spec, clean_shutdown, host_list)
try:
task.execute()
except exception.NoValidHost as ex:
vm_state = instance.vm_state
if not vm_state:
vm_state = vm_states.ACTIVE
updates = {'vm_state': vm_state, 'task_state': None}
self._set_vm_state_and_notify(context, instance.uuid,
'migrate_server',
updates, ex, request_spec)
# if the flavor IDs match, it's migrate; otherwise resize
if flavor.id == instance.instance_type_id:
msg = _("No valid host found for cold migrate")
else:
msg = _("No valid host found for resize")
raise exception.NoValidHost(reason=msg)
except exception.UnsupportedPolicyException as ex:
with excutils.save_and_reraise_exception():
vm_state = instance.vm_state
if not vm_state:
vm_state = vm_states.ACTIVE
updates = {'vm_state': vm_state, 'task_state': None}
self._set_vm_state_and_notify(context, instance.uuid,
'migrate_server',
updates, ex, request_spec)
except Exception as ex:
with excutils.save_and_reraise_exception():
# Refresh the instance so we don't overwrite vm_state changes
# set after we executed the task.
try:
instance.refresh()
# Passing vm_state is kind of silly but it's expected in
# set_vm_state_and_notify.
updates = {'vm_state': instance.vm_state,
'task_state': None}
self._set_vm_state_and_notify(context, instance.uuid,
'migrate_server',
updates, ex, request_spec)
except exception.InstanceNotFound:
# We can't send the notification because the instance is
# gone so just log it.
LOG.info('During %s the instance was deleted.',
'resize' if instance.instance_type_id != flavor.id
else 'cold migrate', instance=instance)
# NOTE(sbauza): Make sure we persist the new flavor in case we had
# a successful scheduler call if and only if nothing bad happened
if request_spec.obj_what_changed():
request_spec.save()
def _set_vm_state_and_notify(self, context, instance_uuid, method, updates,
ex, request_spec):
scheduler_utils.set_vm_state_and_notify(
context, instance_uuid, 'compute_task', method, updates,
ex, request_spec)
def _cleanup_allocated_networks(
self, context, instance, requested_networks):
try:
# If we were told not to allocate networks let's save ourselves
# the trouble of calling the network API.
if not (requested_networks and requested_networks.no_allocate):
self.network_api.deallocate_for_instance(
context, instance, requested_networks=requested_networks)
except Exception:
LOG.exception('Failed to deallocate networks', instance=instance)
return
instance.system_metadata['network_allocated'] = 'False'
try:
instance.save()
except exception.InstanceNotFound:
# NOTE: It's possible that we're cleaning up the networks
# because the instance was deleted. If that's the case then this
# exception will be raised by instance.save()
pass
@targets_cell
@wrap_instance_event(prefix='conductor')
def live_migrate_instance(self, context, instance, scheduler_hint,
block_migration, disk_over_commit, request_spec):
self._live_migrate(context, instance, scheduler_hint,
block_migration, disk_over_commit, request_spec)
def _live_migrate(self, context, instance, scheduler_hint,
block_migration, disk_over_commit, request_spec):
destination = scheduler_hint.get("host")
def _set_vm_state(context, instance, ex, vm_state=None,
task_state=None):
request_spec = {'instance_properties': {
'uuid': instance.uuid, },
}
scheduler_utils.set_vm_state_and_notify(context,
instance.uuid,
'compute_task', 'migrate_server',
dict(vm_state=vm_state,
task_state=task_state,
expected_task_state=task_states.MIGRATING,),
ex, request_spec)
migration = objects.Migration(context=context.elevated())
migration.dest_compute = destination
migration.status = 'accepted'
migration.instance_uuid = instance.uuid
migration.source_compute = instance.host
migration.migration_type = fields.MigrationType.LIVE_MIGRATION
if instance.obj_attr_is_set('flavor'):
migration.old_instance_type_id = instance.flavor.id
migration.new_instance_type_id = instance.flavor.id
else:
migration.old_instance_type_id = instance.instance_type_id
migration.new_instance_type_id = instance.instance_type_id
migration.create()
task = self._build_live_migrate_task(context, instance, destination,
block_migration, disk_over_commit,
migration, request_spec)
try:
task.execute()
except (exception.NoValidHost,
exception.ComputeHostNotFound,
exception.ComputeServiceUnavailable,
exception.InvalidHypervisorType,
exception.InvalidCPUInfo,
exception.UnableToMigrateToSelf,
exception.DestinationHypervisorTooOld,
exception.InvalidLocalStorage,
exception.InvalidSharedStorage,
exception.HypervisorUnavailable,
exception.InstanceInvalidState,
exception.MigrationPreCheckError,
exception.MigrationSchedulerRPCError) as ex:
with excutils.save_and_reraise_exception():
_set_vm_state(context, instance, ex, instance.vm_state)
migration.status = 'error'
migration.save()
except Exception as ex:
LOG.error('Migration of instance %(instance_id)s to host'
' %(dest)s unexpectedly failed.',
{'instance_id': instance.uuid, 'dest': destination},
exc_info=True)
# Reset the task state to None to indicate completion of
# the operation as it is done in case of known exceptions.
_set_vm_state(context, instance, ex, vm_states.ERROR,
task_state=None)
migration.status = 'error'
migration.save()
raise exception.MigrationError(reason=six.text_type(ex))
def _build_live_migrate_task(self, context, instance, destination,
block_migration, disk_over_commit, migration,
request_spec=None):
return live_migrate.LiveMigrationTask(context, instance,
destination, block_migration,
disk_over_commit, migration,
self.compute_rpcapi,
self.servicegroup_api,
self.query_client,
self.report_client,
request_spec)
def _build_cold_migrate_task(self, context, instance, flavor, request_spec,
clean_shutdown, host_list):
return migrate.MigrationTask(context, instance, flavor,
request_spec, clean_shutdown,
self.compute_rpcapi,
self.query_client, self.report_client,
host_list, self.network_api)
def _destroy_build_request(self, context, instance):
# The BuildRequest needs to be stored until the instance is mapped to
# an instance table. At that point it will never be used again and
# should be deleted.
build_request = objects.BuildRequest.get_by_instance_uuid(
context, instance.uuid)
# TODO(alaski): Sync API updates of the build_request to the
# instance before it is destroyed. Right now only locked_by can
# be updated before this is destroyed.
build_request.destroy()
def _populate_instance_mapping(self, context, instance, host):
try:
inst_mapping = objects.InstanceMapping.get_by_instance_uuid(
context, instance.uuid)
except exception.InstanceMappingNotFound:
# NOTE(alaski): If nova-api is up to date this exception should
# never be hit. But during an upgrade it's possible that an old
# nova-api didn't create an instance_mapping during this boot
# request.
LOG.debug('Instance was not mapped to a cell, likely due '
'to an older nova-api service running.',
instance=instance)
return None
else:
try:
host_mapping = objects.HostMapping.get_by_host(context,
host.service_host)
except exception.HostMappingNotFound:
# NOTE(alaski): For now this exception means that a
# deployment has not migrated to cellsv2 and we should
# remove the instance_mapping that has been created.
# Eventually this will indicate a failure to properly map a
# host to a cell and we may want to reschedule.
inst_mapping.destroy()
return None
else:
inst_mapping.cell_mapping = host_mapping.cell_mapping
inst_mapping.save()
return inst_mapping
def _validate_existing_attachment_ids(self, context, instance, bdms):
"""Ensure any attachment ids referenced by the bdms exist.
New attachments will only be created if the attachment ids referenced
by the bdms no longer exist. This can happen when an instance is
rescheduled after a failure to spawn as cleanup code on the previous
host will delete attachments before rescheduling.
"""
for bdm in bdms:
if bdm.is_volume and bdm.attachment_id:
try:
self.volume_api.attachment_get(context, bdm.attachment_id)
except exception.VolumeAttachmentNotFound:
attachment = self.volume_api.attachment_create(
context, bdm.volume_id, instance.uuid)
bdm.attachment_id = attachment['id']
bdm.save()
def _cleanup_when_reschedule_fails(
self, context, instance, exception, legacy_request_spec,
requested_networks):
"""Set the instance state and clean up.
It is only used in case build_instance fails while rescheduling the
instance
"""
updates = {'vm_state': vm_states.ERROR,
'task_state': None}
self._set_vm_state_and_notify(
context, instance.uuid, 'build_instances', updates, exception,
legacy_request_spec)
self._cleanup_allocated_networks(
context, instance, requested_networks)
compute_utils.delete_arqs_if_needed(context, instance)
# NOTE(danms): This is never cell-targeted because it is only used for
# n-cpu reschedules which go to the cell conductor and thus are always
# cell-specific.
def build_instances(self, context, instances, image, filter_properties,
admin_password, injected_files, requested_networks,
security_groups, block_device_mapping=None, legacy_bdm=True,
request_spec=None, host_lists=None):
# TODO(ndipanov): Remove block_device_mapping and legacy_bdm in version
# 2.0 of the RPC API.
# TODO(danms): Remove this in version 2.0 of the RPC API
if (requested_networks and
not isinstance(requested_networks,
objects.NetworkRequestList)):
requested_networks = objects.NetworkRequestList.from_tuples(
requested_networks)
# TODO(melwitt): Remove this in version 2.0 of the RPC API
flavor = filter_properties.get('instance_type')
if flavor and not isinstance(flavor, objects.Flavor):
# Code downstream may expect extra_specs to be populated since it
# is receiving an object, so lookup the flavor to ensure this.
flavor = objects.Flavor.get_by_id(context, flavor['id'])
filter_properties = dict(filter_properties, instance_type=flavor)
# Older computes will not send a request_spec during reschedules so we
# need to check and build our own if one is not provided.
if request_spec is None:
legacy_request_spec = scheduler_utils.build_request_spec(
image, instances)
else:
# TODO(mriedem): This is annoying but to populate the local
# request spec below using the filter_properties, we have to pass
# in a primitive version of the request spec. Yes it's inefficient
# and we can remove it once the populate_retry and
# populate_filter_properties utility methods are converted to
# work on a RequestSpec object rather than filter_properties.
# NOTE(gibi): we have to keep a reference to the original
# RequestSpec object passed to this function as we lose information
# during the below legacy conversion
legacy_request_spec = request_spec.to_legacy_request_spec_dict()
# 'host_lists' will be None during a reschedule from a pre-Queens
# compute. In all other cases, it will be a list of lists, though the
# lists may be empty if there are no more hosts left in a rescheduling
# situation.
is_reschedule = host_lists is not None
try:
# check retry policy. Rather ugly use of instances[0]...
# but if we've exceeded max retries... then we really only
# have a single instance.
# TODO(sbauza): Provide directly the RequestSpec object
# when populate_retry() accepts it
scheduler_utils.populate_retry(
filter_properties, instances[0].uuid)
instance_uuids = [instance.uuid for instance in instances]
spec_obj = objects.RequestSpec.from_primitives(
context, legacy_request_spec, filter_properties)
LOG.debug("Rescheduling: %s", is_reschedule)
if is_reschedule:
# Make sure that we have a host, as we may have exhausted all
# our alternates
if not host_lists[0]:
# We have an empty list of hosts, so this instance has
# failed to build.
msg = ("Exhausted all hosts available for retrying build "
"failures for instance %(instance_uuid)s." %
{"instance_uuid": instances[0].uuid})
raise exception.MaxRetriesExceeded(reason=msg)
else:
# This is not a reschedule, so we need to call the scheduler to
# get appropriate hosts for the request.
# NOTE(gibi): We only call the scheduler if we are rescheduling
# from a really old compute. In that case we do not support
# externally-defined resource requests, like port QoS. So no
# requested_resources are set on the RequestSpec here.
host_lists = self._schedule_instances(context, spec_obj,
instance_uuids, return_alternates=True)
except Exception as exc:
# NOTE(mriedem): If we're rescheduling from a failed build on a
# compute, "retry" will be set and num_attempts will be >1 because
# populate_retry above will increment it. If the server build was
# forced onto a host/node or [scheduler]/max_attempts=1, "retry"
# won't be in filter_properties and we won't get here because
# nova-compute will just abort the build since reschedules are
# disabled in those cases.
num_attempts = filter_properties.get(
'retry', {}).get('num_attempts', 1)
for instance in instances:
# If num_attempts > 1, we're in a reschedule and probably
# either hit NoValidHost or MaxRetriesExceeded. Either way,
# the build request should already be gone and we probably
# can't reach the API DB from the cell conductor.
if num_attempts <= 1:
try:
# If the BuildRequest stays around then instance
# show/lists will pull from it rather than the errored
# instance.
self._destroy_build_request(context, instance)
except exception.BuildRequestNotFound:
pass
self._cleanup_when_reschedule_fails(
context, instance, exc, legacy_request_spec,
requested_networks)
return
elevated = context.elevated()
for (instance, host_list) in six.moves.zip(instances, host_lists):
host = host_list.pop(0)
if is_reschedule:
# If this runs in the superconductor, the first instance will
# already have its resources claimed in placement. If this is a
# retry, though, this is running in the cell conductor, and we
# need to claim first to ensure that the alternate host still
# has its resources available. Note that there are schedulers
# that don't support Placement, so must assume that the host is
# still available.
host_available = False
while host and not host_available:
if host.allocation_request:
alloc_req = jsonutils.loads(host.allocation_request)
else:
alloc_req = None
if alloc_req:
try:
host_available = scheduler_utils.claim_resources(
elevated, self.report_client, spec_obj,
instance.uuid, alloc_req,
host.allocation_request_version)
if request_spec and host_available:
# NOTE(gibi): redo the request group - resource
# provider mapping as the above claim call
# moves the allocation of the instance to
# another host
scheduler_utils.fill_provider_mapping(
request_spec, host)
except Exception as exc:
self._cleanup_when_reschedule_fails(
context, instance, exc, legacy_request_spec,
requested_networks)
return
else:
# Some deployments use different schedulers that do not
# use Placement, so they will not have an
# allocation_request to claim with. For those cases,
# there is no concept of claiming, so just assume that
# the host is valid.
host_available = True
if not host_available:
# Insufficient resources remain on that host, so
# discard it and try the next.
host = host_list.pop(0) if host_list else None
if not host_available:
# No more available hosts for retrying the build.
msg = ("Exhausted all hosts available for retrying build "
"failures for instance %(instance_uuid)s." %
{"instance_uuid": instance.uuid})
exc = exception.MaxRetriesExceeded(reason=msg)
self._cleanup_when_reschedule_fails(
context, instance, exc, legacy_request_spec,
requested_networks)
return
# The availability_zone field was added in v1.1 of the Selection
# object so make sure to handle the case where it is missing.
if 'availability_zone' in host:
instance.availability_zone = host.availability_zone
else:
try:
instance.availability_zone = (
availability_zones.get_host_availability_zone(context,
host.service_host))
except Exception as exc:
# Put the instance into ERROR state, set task_state to
# None, inject a fault, etc.
self._cleanup_when_reschedule_fails(
context, instance, exc, legacy_request_spec,
requested_networks)
continue
try:
# NOTE(danms): This saves the az change above, refreshes our
# instance, and tells us if it has been deleted underneath us
instance.save()
except (exception.InstanceNotFound,
exception.InstanceInfoCacheNotFound):
LOG.debug('Instance deleted during build', instance=instance)
continue
local_filter_props = copy.deepcopy(filter_properties)
scheduler_utils.populate_filter_properties(local_filter_props,
host)
# Populate the request_spec with the local_filter_props information
# like retries and limits. Note that at this point the request_spec
# could have come from a compute via reschedule and it would
# already have some things set, like scheduler_hints.
local_reqspec = objects.RequestSpec.from_primitives(
context, legacy_request_spec, local_filter_props)
# NOTE(gibi): at this point the request spec already got converted
# to a legacy dict and then back to an object so we lost the non
# legacy part of the spec. Re-populate the requested_resources
# field based on the original request spec object passed to this
# function.
if request_spec:
local_reqspec.requested_resources = (
request_spec.requested_resources)
# The block_device_mapping passed from the api doesn't contain
# instance specific information
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
# This is populated in scheduler_utils.populate_retry
num_attempts = local_filter_props.get('retry',
{}).get('num_attempts', 1)
if num_attempts <= 1:
# If this is a reschedule the instance is already mapped to
# this cell and the BuildRequest is already deleted so ignore
# the logic below.
inst_mapping = self._populate_instance_mapping(context,
instance,
host)
try:
self._destroy_build_request(context, instance)
except exception.BuildRequestNotFound:
# This indicates an instance delete has been requested in
# the API. Stop the build, cleanup the instance_mapping and
# potentially the block_device_mappings
# TODO(alaski): Handle block_device_mapping cleanup
if inst_mapping:
inst_mapping.destroy()
return
else:
# NOTE(lyarwood): If this is a reschedule then recreate any
# attachments that were previously removed when cleaning up
# after failures to spawn etc.
self._validate_existing_attachment_ids(context, instance, bdms)
alts = [(alt.service_host, alt.nodename) for alt in host_list]
LOG.debug("Selected host: %s; Selected node: %s; Alternates: %s",
host.service_host, host.nodename, alts, instance=instance)
try:
accel_uuids = self._create_and_bind_arq_for_instance(
context, instance, host, local_reqspec)
except Exception as exc:
LOG.exception('Failed to reschedule. Reason: %s', exc)
self._cleanup_when_reschedule_fails(
context, instance, exc, legacy_request_spec,
requested_networks)
continue
self.compute_rpcapi.build_and_run_instance(context,
instance=instance, host=host.service_host, image=image,
request_spec=local_reqspec,
filter_properties=local_filter_props,
admin_password=admin_password,
injected_files=injected_files,
requested_networks=requested_networks,
security_groups=security_groups,
block_device_mapping=bdms, node=host.nodename,
limits=host.limits, host_list=host_list,
accel_uuids=accel_uuids)
def _create_and_bind_arq_for_instance(self, context, instance, host,
request_spec):
try:
resource_provider_mapping = (
request_spec.get_request_group_mapping())
# Using nodename instead of hostname. See:
# http://lists.openstack.org/pipermail/openstack-discuss/2019-November/011044.html # noqa
return self._create_and_bind_arqs(
context, instance.uuid, instance.flavor.extra_specs,
host.nodename, resource_provider_mapping)
except exception.AcceleratorRequestBindingFailed as exc:
# If anything failed here we need to cleanup and bail out.
cyclient = cyborg.get_client(context)
cyclient.delete_arqs_by_uuid(exc.arqs)
raise
def _schedule_instances(self, context, request_spec,
instance_uuids=None, return_alternates=False):
scheduler_utils.setup_instance_group(context, request_spec)
with timeutils.StopWatch() as timer:
host_lists = self.query_client.select_destinations(
context, request_spec, instance_uuids, return_objects=True,
return_alternates=return_alternates)
LOG.debug('Took %0.2f seconds to select destinations for %s '
'instance(s).', timer.elapsed(), len(instance_uuids))
return host_lists
@staticmethod
def _restrict_request_spec_to_cell(context, instance, request_spec):
"""Sets RequestSpec.requested_destination.cell for the move operation
Move operations, e.g. evacuate and unshelve, must be restricted to the
cell in which the instance already exists, so this method is used to
target the RequestSpec, which is sent to the scheduler via the
_schedule_instances method, to the instance's current cell.
:param context: nova auth RequestContext
"""
instance_mapping = \
objects.InstanceMapping.get_by_instance_uuid(
context, instance.uuid)
LOG.debug('Requesting cell %(cell)s during scheduling',
{'cell': instance_mapping.cell_mapping.identity},
instance=instance)
if ('requested_destination' in request_spec and
request_spec.requested_destination):
request_spec.requested_destination.cell = (
instance_mapping.cell_mapping)
else:
request_spec.requested_destination = (
objects.Destination(
cell=instance_mapping.cell_mapping))
# TODO(mriedem): Make request_spec required in ComputeTaskAPI RPC v2.0.
@targets_cell
def unshelve_instance(self, context, instance, request_spec=None):
sys_meta = instance.system_metadata
def safe_image_show(ctx, image_id):
if image_id:
return self.image_api.get(ctx, image_id, show_deleted=False)
else:
raise exception.ImageNotFound(image_id='')
if instance.vm_state == vm_states.SHELVED:
instance.task_state = task_states.POWERING_ON
instance.save(expected_task_state=task_states.UNSHELVING)
self.compute_rpcapi.start_instance(context, instance)
elif instance.vm_state == vm_states.SHELVED_OFFLOADED:
image = None
image_id = sys_meta.get('shelved_image_id')
# No need to check for image if image_id is None as
# "shelved_image_id" key is not set for volume backed
# instance during the shelve process
if image_id:
with compute_utils.EventReporter(
context, 'get_image_info', self.host, instance.uuid):
try:
image = safe_image_show(context, image_id)
except exception.ImageNotFound as error:
instance.vm_state = vm_states.ERROR
instance.save()
reason = _('Unshelve attempted but the image %s '
'cannot be found.') % image_id
LOG.error(reason, instance=instance)
compute_utils.add_instance_fault_from_exc(
context, instance, error, sys.exc_info(),
fault_message=reason)
raise exception.UnshelveException(
instance_id=instance.uuid, reason=reason)
try:
with compute_utils.EventReporter(context, 'schedule_instances',
self.host, instance.uuid):
# NOTE(sbauza): Force_hosts/nodes needs to be reset
# if we want to make sure that the next destination
# is not forced to be the original host
request_spec.reset_forced_destinations()
# TODO(sbauza): Provide directly the RequestSpec object
# when populate_filter_properties accepts it
filter_properties = request_spec.\
to_legacy_filter_properties_dict()
port_res_req = (
self.network_api.get_requested_resource_for_instance(
context, instance.uuid))
# NOTE(gibi): When cyborg or other module wants to handle
# similar non-nova resources then here we have to collect
# all the external resource requests in a single list and
# add them to the RequestSpec.
request_spec.requested_resources = port_res_req
# NOTE(cfriesen): Ensure that we restrict the scheduler to
# the cell specified by the instance mapping.
self._restrict_request_spec_to_cell(
context, instance, request_spec)
request_spec.ensure_project_and_user_id(instance)
request_spec.ensure_network_metadata(instance)
compute_utils.heal_reqspec_is_bfv(
context, request_spec, instance)
host_lists = self._schedule_instances(context,
request_spec, [instance.uuid],
return_alternates=False)
host_list = host_lists[0]
selection = host_list[0]
scheduler_utils.populate_filter_properties(
filter_properties, selection)
(host, node) = (selection.service_host, selection.nodename)
instance.availability_zone = (
availability_zones.get_host_availability_zone(
context, host))
scheduler_utils.fill_provider_mapping(
request_spec, selection)
self.compute_rpcapi.unshelve_instance(
context, instance, host, request_spec, image=image,
filter_properties=filter_properties, node=node)
except (exception.NoValidHost,
exception.UnsupportedPolicyException):
instance.task_state = None
instance.save()
LOG.warning("No valid host found for unshelve instance",
instance=instance)
return
except Exception:
with excutils.save_and_reraise_exception():
instance.task_state = None
instance.save()
LOG.error("Unshelve attempted but an error "
"has occurred", instance=instance)
else:
LOG.error('Unshelve attempted but vm_state not SHELVED or '
'SHELVED_OFFLOADED', instance=instance)
instance.vm_state = vm_states.ERROR
instance.save()
return
def _allocate_for_evacuate_dest_host(self, context, instance, host,
request_spec=None):
# The user is forcing the destination host and bypassing the
# scheduler. We need to copy the source compute node
# allocations in Placement to the destination compute node.
# Normally select_destinations() in the scheduler would do this
# for us, but when forcing the target host we don't call the
# scheduler.
source_node = None # This is used for error handling below.
try:
source_node = objects.ComputeNode.get_by_host_and_nodename(
context, instance.host, instance.node)
dest_node = (
objects.ComputeNode.get_first_node_by_host_for_old_compat(
context, host, use_slave=True))
except exception.ComputeHostNotFound as ex:
with excutils.save_and_reraise_exception():
self._set_vm_state_and_notify(
context, instance.uuid, 'rebuild_server',
{'vm_state': instance.vm_state,
'task_state': None}, ex, request_spec)
if source_node:
LOG.warning('Specified host %s for evacuate was not '
'found.', host, instance=instance)
else:
LOG.warning('Source host %s and node %s for evacuate was '
'not found.', instance.host, instance.node,
instance=instance)
try:
scheduler_utils.claim_resources_on_destination(
context, self.report_client, instance, source_node, dest_node)
except exception.NoValidHost as ex:
with excutils.save_and_reraise_exception():
self._set_vm_state_and_notify(
context, instance.uuid, 'rebuild_server',
{'vm_state': instance.vm_state,
'task_state': None}, ex, request_spec)
LOG.warning('Specified host %s for evacuate is '
'invalid.', host, instance=instance)
# TODO(mriedem): Make request_spec required in ComputeTaskAPI RPC v2.0.
@targets_cell
def rebuild_instance(self, context, instance, orig_image_ref, image_ref,
injected_files, new_pass, orig_sys_metadata,
bdms, recreate, on_shared_storage,
preserve_ephemeral=False, host=None,
request_spec=None):
# recreate=True means the instance is being evacuated from a failed
# host to a new destination host. The 'recreate' variable name is
# confusing, so rename it to evacuate here at the top, which is simpler
# than renaming a parameter in an RPC versioned method.
evacuate = recreate
# NOTE(efried): It would be nice if this were two separate events, one
# for 'rebuild' and one for 'evacuate', but this is part of the API
# now, so it would be nontrivial to change.
with compute_utils.EventReporter(context, 'rebuild_server',
self.host, instance.uuid):
node = limits = None
try:
migration = objects.Migration.get_by_instance_and_status(
context, instance.uuid, 'accepted')
except exception.MigrationNotFoundByStatus:
LOG.debug("No migration record for the rebuild/evacuate "
"request.", instance=instance)
migration = None
# The host variable is passed in two cases:
# 1. rebuild - the instance.host is passed to rebuild on the
# same host and bypass the scheduler *unless* a new image
# was specified
# 2. evacuate with specified host and force=True - the specified
# host is passed and is meant to bypass the scheduler.
# NOTE(mriedem): This could be a lot more straight-forward if we
# had separate methods for rebuild and evacuate...
if host:
# We only create a new allocation on the specified host if
# we're doing an evacuate since that is a move operation.
if host != instance.host:
# If a destination host is forced for evacuate, create
# allocations against it in Placement.
try:
self._allocate_for_evacuate_dest_host(
context, instance, host, request_spec)
except exception.AllocationUpdateFailed as ex:
with excutils.save_and_reraise_exception():
if migration:
migration.status = 'error'
migration.save()
# NOTE(efried): It would be nice if this were two
# separate events, one for 'rebuild' and one for
# 'evacuate', but this is part of the API now, so
# it would be nontrivial to change.
self._set_vm_state_and_notify(
context,
instance.uuid,
'rebuild_server',
{'vm_state': vm_states.ERROR,
'task_state': None}, ex, request_spec)
LOG.warning('Rebuild failed: %s',
six.text_type(ex), instance=instance)
except exception.NoValidHost:
with excutils.save_and_reraise_exception():
if migration:
migration.status = 'error'
migration.save()
else:
# At this point, the user is either:
#
# 1. Doing a rebuild on the same host (not evacuate) and
# specified a new image.
# 2. Evacuating and specified a host but are not forcing it.
#
# In either case, the API passes host=None but sets up the
# RequestSpec.requested_destination field for the specified
# host.
if evacuate:
# NOTE(sbauza): Augment the RequestSpec object by excluding
# the source host for avoiding the scheduler to pick it
request_spec.ignore_hosts = [instance.host]
# NOTE(sbauza): Force_hosts/nodes needs to be reset
# if we want to make sure that the next destination
# is not forced to be the original host
request_spec.reset_forced_destinations()
port_res_req = (
self.network_api.get_requested_resource_for_instance(
context, instance.uuid))
# NOTE(gibi): When cyborg or other module wants to handle
# similar non-nova resources then here we have to collect
# all the external resource requests in a single list and
# add them to the RequestSpec.
request_spec.requested_resources = port_res_req
try:
# if this is a rebuild of instance on the same host with
# new image.
if not evacuate and orig_image_ref != image_ref:
self._validate_image_traits_for_rebuild(context,
instance,
image_ref)
self._restrict_request_spec_to_cell(
context, instance, request_spec)
request_spec.ensure_project_and_user_id(instance)
request_spec.ensure_network_metadata(instance)
compute_utils.heal_reqspec_is_bfv(
context, request_spec, instance)
host_lists = self._schedule_instances(context,
request_spec, [instance.uuid],
return_alternates=False)
host_list = host_lists[0]
selection = host_list[0]
host, node, limits = (selection.service_host,
selection.nodename, selection.limits)
if recreate:
scheduler_utils.fill_provider_mapping(
request_spec, selection)
except (exception.NoValidHost,
exception.UnsupportedPolicyException,
exception.AllocationUpdateFailed,
# the next two can come from fill_provider_mapping and
# signals a software error.
NotImplementedError,
ValueError) as ex:
if migration:
migration.status = 'error'
migration.save()
# Rollback the image_ref if a new one was provided (this
# only happens in the rebuild case, not evacuate).
if orig_image_ref and orig_image_ref != image_ref:
instance.image_ref = orig_image_ref
instance.save()
with excutils.save_and_reraise_exception():
# NOTE(efried): It would be nice if this were two
# separate events, one for 'rebuild' and one for
# 'evacuate', but this is part of the API now, so it
# would be nontrivial to change.
self._set_vm_state_and_notify(context, instance.uuid,
'rebuild_server',
{'vm_state': vm_states.ERROR,
'task_state': None}, ex, request_spec)
LOG.warning('Rebuild failed: %s',
six.text_type(ex), instance=instance)
compute_utils.notify_about_instance_usage(
self.notifier, context, instance, "rebuild.scheduled")
compute_utils.notify_about_instance_rebuild(
context, instance, host,
action=fields.NotificationAction.REBUILD_SCHEDULED,
source=fields.NotificationSource.CONDUCTOR)
instance.availability_zone = (
availability_zones.get_host_availability_zone(
context, host))
self.compute_rpcapi.rebuild_instance(context,
instance=instance,
new_pass=new_pass,
injected_files=injected_files,
image_ref=image_ref,
orig_image_ref=orig_image_ref,
orig_sys_metadata=orig_sys_metadata,
bdms=bdms,
recreate=evacuate,
on_shared_storage=on_shared_storage,
preserve_ephemeral=preserve_ephemeral,
migration=migration,
host=host, node=node, limits=limits,
request_spec=request_spec)
def _validate_image_traits_for_rebuild(self, context, instance, image_ref):
"""Validates that the traits specified in the image can be satisfied
by the providers of the current allocations for the instance during
rebuild of the instance. If the traits cannot be
satisfied, fails the action by raising a NoValidHost exception.
:raises: NoValidHost exception in case the traits on the providers
of the allocated resources for the instance do not match
the required traits on the image.
"""
image_meta = objects.ImageMeta.from_image_ref(
context, self.image_api, image_ref)
if ('properties' not in image_meta or
'traits_required' not in image_meta.properties or not
image_meta.properties.traits_required):
return
image_traits = set(image_meta.properties.traits_required)
# check any of the image traits are forbidden in flavor traits.
# if so raise an exception
extra_specs = instance.flavor.extra_specs
forbidden_flavor_traits = set()
for key, val in extra_specs.items():
if key.startswith('trait'):
# get the actual key.
prefix, parsed_key = key.split(':', 1)
if val == 'forbidden':
forbidden_flavor_traits.add(parsed_key)
forbidden_traits = image_traits & forbidden_flavor_traits
if forbidden_traits:
raise exception.NoValidHost(
reason=_("Image traits are part of forbidden "
"traits in flavor associated with the server. "
"Either specify a different image during rebuild "
"or create a new server with the specified image "
"and a compatible flavor."))
# If image traits are present, then validate against allocations.
allocations = self.report_client.get_allocations_for_consumer(
context, instance.uuid)
instance_rp_uuids = list(allocations)
# Get provider tree for the instance. We use the uuid of the host
# on which the instance is rebuilding to get the provider tree.
compute_node = objects.ComputeNode.get_by_host_and_nodename(
context, instance.host, instance.node)
# TODO(karimull): Call with a read-only version, when available.
instance_rp_tree = (
self.report_client.get_provider_tree_and_ensure_root(
context, compute_node.uuid))
traits_in_instance_rps = set()
for rp_uuid in instance_rp_uuids:
traits_in_instance_rps.update(
instance_rp_tree.data(rp_uuid).traits)
missing_traits = image_traits - traits_in_instance_rps
if missing_traits:
raise exception.NoValidHost(
reason=_("Image traits cannot be "
"satisfied by the current resource providers. "
"Either specify a different image during rebuild "
"or create a new server with the specified image."))
# TODO(avolkov): move method to bdm
@staticmethod
def _volume_size(instance_type, bdm):
size = bdm.get('volume_size')
# NOTE (ndipanov): inherit flavor size only for swap and ephemeral
if (size is None and bdm.get('source_type') == 'blank' and
bdm.get('destination_type') == 'local'):
if bdm.get('guest_format') == 'swap':
size = instance_type.get('swap', 0)
else:
size = instance_type.get('ephemeral_gb', 0)
return size
def _create_block_device_mapping(self, cell, instance_type, instance_uuid,
block_device_mapping):
"""Create the BlockDeviceMapping objects in the db.
This method makes a copy of the list in order to avoid using the same
id field in case this is called for multiple instances.
"""
LOG.debug("block_device_mapping %s", list(block_device_mapping),
instance_uuid=instance_uuid)
instance_block_device_mapping = copy.deepcopy(block_device_mapping)
for bdm in instance_block_device_mapping:
bdm.volume_size = self._volume_size(instance_type, bdm)
bdm.instance_uuid = instance_uuid
with obj_target_cell(bdm, cell):
bdm.update_or_create()
return instance_block_device_mapping
def _create_tags(self, context, instance_uuid, tags):
"""Create the Tags objects in the db."""
if tags:
tag_list = [tag.tag for tag in tags]
instance_tags = objects.TagList.create(
context, instance_uuid, tag_list)
return instance_tags
else:
return tags
def _create_instance_action_for_cell0(self, context, instance, exc):
"""Create a failed "create" instance action for the instance in cell0.
:param context: nova auth RequestContext targeted at cell0
:param instance: Instance object being buried in cell0
:param exc: Exception that occurred which resulted in burial
"""
# First create the action record.
objects.InstanceAction.action_start(
context, instance.uuid, instance_actions.CREATE, want_result=False)
# Now create an event for that action record.
event_name = 'conductor_schedule_and_build_instances'
objects.InstanceActionEvent.event_start(
context, instance.uuid, event_name, want_result=False,
host=self.host)
# And finish the event with the exception. Note that we expect this
# method to be called from _bury_in_cell0 which is called from within
# an exception handler so sys.exc_info should return values but if not
# it's not the end of the world - this is best effort.
objects.InstanceActionEvent.event_finish_with_failure(
context, instance.uuid, event_name, exc_val=exc,
exc_tb=sys.exc_info()[2], want_result=False)
def _bury_in_cell0(self, context, request_spec, exc,
build_requests=None, instances=None,
block_device_mapping=None,
tags=None):
"""Ensure all provided build_requests and instances end up in cell0.
Cell0 is the fake cell we schedule dead instances to when we can't
schedule them somewhere real. Requests that don't yet have instances
will get a new instance, created in cell0. Instances that have not yet
been created will be created in cell0. All build requests are destroyed
after we're done. Failure to delete a build request will trigger the
instance deletion, just like the happy path in
schedule_and_build_instances() below.
"""
try:
cell0 = objects.CellMapping.get_by_uuid(
context, objects.CellMapping.CELL0_UUID)
except exception.CellMappingNotFound:
# Not yet setup for cellsv2. Instances will need to be written
# to the configured database. This will become a deployment
# error in Ocata.
LOG.error('No cell mapping found for cell0 while '
'trying to record scheduling failure. '
'Setup is incomplete.')
return
build_requests = build_requests or []
instances = instances or []
instances_by_uuid = {inst.uuid: inst for inst in instances}
for build_request in build_requests:
if build_request.instance_uuid not in instances_by_uuid:
# This is an instance object with no matching db entry.
instance = build_request.get_new_instance(context)
instances_by_uuid[instance.uuid] = instance
updates = {'vm_state': vm_states.ERROR, 'task_state': None}
for instance in instances_by_uuid.values():
inst_mapping = None
try:
# We don't need the cell0-targeted context here because the
# instance mapping is in the API DB.
inst_mapping = \
objects.InstanceMapping.get_by_instance_uuid(
context, instance.uuid)
except exception.InstanceMappingNotFound:
# The API created the instance mapping record so it should
# definitely be here. Log an error but continue to create the
# instance in the cell0 database.
LOG.error('While burying instance in cell0, no instance '
'mapping was found.', instance=instance)
# Perform a final sanity check that the instance is not mapped
# to some other cell already because of maybe some crazy
# clustered message queue weirdness.
if inst_mapping and inst_mapping.cell_mapping is not None:
LOG.error('When attempting to bury instance in cell0, the '
'instance is already mapped to cell %s. Ignoring '
'bury in cell0 attempt.',
inst_mapping.cell_mapping.identity,
instance=instance)
continue
with obj_target_cell(instance, cell0) as cctxt:
instance.create()
if inst_mapping:
inst_mapping.cell_mapping = cell0
inst_mapping.save()
# Record an instance action with a failed event.
self._create_instance_action_for_cell0(
cctxt, instance, exc)
# NOTE(mnaser): In order to properly clean-up volumes after
# being buried in cell0, we need to store BDMs.
if block_device_mapping:
self._create_block_device_mapping(
cell0, instance.flavor, instance.uuid,
block_device_mapping)
self._create_tags(cctxt, instance.uuid, tags)
# Use the context targeted to cell0 here since the instance is
# now in cell0.
self._set_vm_state_and_notify(
cctxt, instance.uuid, 'build_instances', updates,
exc, request_spec)
for build_request in build_requests:
try:
build_request.destroy()
except exception.BuildRequestNotFound:
# Instance was deleted before we finished scheduling
inst = instances_by_uuid[build_request.instance_uuid]
with obj_target_cell(inst, cell0):
inst.destroy()
def schedule_and_build_instances(self, context, build_requests,
request_specs, image,
admin_password, injected_files,
requested_networks, block_device_mapping,
tags=None):
# Add all the UUIDs for the instances
instance_uuids = [spec.instance_uuid for spec in request_specs]
try:
host_lists = self._schedule_instances(context, request_specs[0],
instance_uuids, return_alternates=True)
except Exception as exc:
LOG.exception('Failed to schedule instances')
self._bury_in_cell0(context, request_specs[0], exc,
build_requests=build_requests,
block_device_mapping=block_device_mapping,
tags=tags)
return
host_mapping_cache = {}
cell_mapping_cache = {}
instances = []
host_az = {} # host=az cache to optimize multi-create
for (build_request, request_spec, host_list) in six.moves.zip(
build_requests, request_specs, host_lists):
instance = build_request.get_new_instance(context)
# host_list is a list of one or more Selection objects, the first
# of which has been selected and its resources claimed.
host = host_list[0]
# Convert host from the scheduler into a cell record
if host.service_host not in host_mapping_cache:
try:
host_mapping = objects.HostMapping.get_by_host(
context, host.service_host)
host_mapping_cache[host.service_host] = host_mapping
except exception.HostMappingNotFound as exc:
LOG.error('No host-to-cell mapping found for selected '
'host %(host)s. Setup is incomplete.',
{'host': host.service_host})
self._bury_in_cell0(
context, request_spec, exc,
build_requests=[build_request], instances=[instance],
block_device_mapping=block_device_mapping,
tags=tags)
# This is a placeholder in case the quota recheck fails.
instances.append(None)
continue
else:
host_mapping = host_mapping_cache[host.service_host]
cell = host_mapping.cell_mapping
# Before we create the instance, let's make one final check that
# the build request is still around and wasn't deleted by the user
# already.
try:
objects.BuildRequest.get_by_instance_uuid(
context, instance.uuid)
except exception.BuildRequestNotFound:
# the build request is gone so we're done for this instance
LOG.debug('While scheduling instance, the build request '
'was already deleted.', instance=instance)
# This is a placeholder in case the quota recheck fails.
instances.append(None)
# If the build request was deleted and the instance is not
# going to be created, there is on point in leaving an orphan
# instance mapping so delete it.
try:
im = objects.InstanceMapping.get_by_instance_uuid(
context, instance.uuid)
im.destroy()
except exception.InstanceMappingNotFound:
pass
self.report_client.delete_allocation_for_instance(
context, instance.uuid)
continue
else:
if host.service_host not in host_az:
host_az[host.service_host] = (
availability_zones.get_host_availability_zone(
context, host.service_host))
instance.availability_zone = host_az[host.service_host]
with obj_target_cell(instance, cell):
instance.create()
instances.append(instance)
cell_mapping_cache[instance.uuid] = cell
# NOTE(melwitt): We recheck the quota after creating the
# objects to prevent users from allocating more resources
# than their allowed quota in the event of a race. This is
# configurable because it can be expensive if strict quota
# limits are not required in a deployment.
if CONF.quota.recheck_quota:
try:
compute_utils.check_num_instances_quota(
context, instance.flavor, 0, 0,
orig_num_req=len(build_requests))
except exception.TooManyInstances as exc:
with excutils.save_and_reraise_exception():
self._cleanup_build_artifacts(context, exc, instances,
build_requests,
request_specs,
block_device_mapping, tags,
cell_mapping_cache)
zipped = six.moves.zip(build_requests, request_specs, host_lists,
instances)
for (build_request, request_spec, host_list, instance) in zipped:
if instance is None:
# Skip placeholders that were buried in cell0 or had their
# build requests deleted by the user before instance create.
continue
cell = cell_mapping_cache[instance.uuid]
# host_list is a list of one or more Selection objects, the first
# of which has been selected and its resources claimed.
host = host_list.pop(0)
alts = [(alt.service_host, alt.nodename) for alt in host_list]
LOG.debug("Selected host: %s; Selected node: %s; Alternates: %s",
host.service_host, host.nodename, alts, instance=instance)
filter_props = request_spec.to_legacy_filter_properties_dict()
scheduler_utils.populate_retry(filter_props, instance.uuid)
scheduler_utils.populate_filter_properties(filter_props,
host)
# Now that we have a selected host (which has claimed resource
# allocations in the scheduler) for this instance, we may need to
# map allocations to resource providers in the request spec.
try:
scheduler_utils.fill_provider_mapping(request_spec, host)
except Exception as exc:
# If anything failed here we need to cleanup and bail out.
with excutils.save_and_reraise_exception():
self._cleanup_build_artifacts(
context, exc, instances, build_requests, request_specs,
block_device_mapping, tags, cell_mapping_cache)
# TODO(melwitt): Maybe we should set_target_cell on the contexts
# once we map to a cell, and remove these separate with statements.
with obj_target_cell(instance, cell) as cctxt:
# send a state update notification for the initial create to
# show it going from non-existent to BUILDING
# This can lazy-load attributes on instance.
notifications.send_update_with_states(cctxt, instance, None,
vm_states.BUILDING, None, None, service="conductor")
objects.InstanceAction.action_start(
cctxt, instance.uuid, instance_actions.CREATE,
want_result=False)
instance_bdms = self._create_block_device_mapping(
cell, instance.flavor, instance.uuid, block_device_mapping)
instance_tags = self._create_tags(cctxt, instance.uuid, tags)
# TODO(Kevin Zheng): clean this up once instance.create() handles
# tags; we do this so the instance.create notification in
# build_and_run_instance in nova-compute doesn't lazy-load tags
instance.tags = instance_tags if instance_tags \
else objects.TagList()
# Update mapping for instance.
self._map_instance_to_cell(context, instance, cell)
if not self._delete_build_request(
context, build_request, instance, cell, instance_bdms,
instance_tags):
# The build request was deleted before/during scheduling so
# the instance is gone and we don't have anything to build for
# this one.
continue
try:
accel_uuids = self._create_and_bind_arq_for_instance(
context, instance, host, request_spec)
except Exception as exc:
with excutils.save_and_reraise_exception():
self._cleanup_build_artifacts(
context, exc, instances, build_requests, request_specs,
block_device_mapping, tags, cell_mapping_cache)
# NOTE(danms): Compute RPC expects security group names or ids
# not objects, so convert this to a list of names until we can
# pass the objects.
legacy_secgroups = [s.identifier
for s in request_spec.security_groups]
with obj_target_cell(instance, cell) as cctxt:
self.compute_rpcapi.build_and_run_instance(
cctxt, instance=instance, image=image,
request_spec=request_spec,
filter_properties=filter_props,
admin_password=admin_password,
injected_files=injected_files,
requested_networks=requested_networks,
security_groups=legacy_secgroups,
block_device_mapping=instance_bdms,
host=host.service_host, node=host.nodename,
limits=host.limits, host_list=host_list,
accel_uuids=accel_uuids)
def _create_and_bind_arqs(self, context, instance_uuid, extra_specs,
hostname, resource_provider_mapping):
"""Create ARQs, determine their RPs and initiate ARQ binding.
The binding is asynchronous; Cyborg will notify on completion.
The notification will be handled in the compute manager.
"""
dp_name = extra_specs.get('accel:device_profile')
if not dp_name:
return []
LOG.debug('Calling Cyborg to get ARQs. dp_name=%s instance=%s',
dp_name, instance_uuid)
cyclient = cyborg.get_client(context)
arqs = cyclient.create_arqs_and_match_resource_providers(
dp_name, resource_provider_mapping)
LOG.debug('Got ARQs with resource provider mapping %s', arqs)
bindings = {arq['uuid']:
{"hostname": hostname,
"device_rp_uuid": arq['device_rp_uuid'],
"instance_uuid": instance_uuid
}
for arq in arqs}
# Initiate Cyborg binding asynchronously
cyclient.bind_arqs(bindings=bindings)
return [arq['uuid'] for arq in arqs]
@staticmethod
def _map_instance_to_cell(context, instance, cell):
"""Update the instance mapping to point at the given cell.
During initial scheduling once a host and cell is selected in which
to build the instance this method is used to update the instance
mapping to point at that cell.
:param context: nova auth RequestContext
:param instance: Instance object being built
:param cell: CellMapping representing the cell in which the instance
was created and is being built.
:returns: InstanceMapping object that was updated.
"""
inst_mapping = objects.InstanceMapping.get_by_instance_uuid(
context, instance.uuid)
# Perform a final sanity check that the instance is not mapped
# to some other cell already because of maybe some crazy
# clustered message queue weirdness.
if inst_mapping.cell_mapping is not None:
LOG.error('During scheduling instance is already mapped to '
'another cell: %s. This should not happen and is an '
'indication of bigger problems. If you see this you '
'should report it to the nova team. Overwriting '
'the mapping to point at cell %s.',
inst_mapping.cell_mapping.identity, cell.identity,
instance=instance)
inst_mapping.cell_mapping = cell
inst_mapping.save()
return inst_mapping
def _cleanup_build_artifacts(self, context, exc, instances, build_requests,
request_specs, block_device_mappings, tags,
cell_mapping_cache):
for (instance, build_request, request_spec) in six.moves.zip(
instances, build_requests, request_specs):
# Skip placeholders that were buried in cell0 or had their
# build requests deleted by the user before instance create.
if instance is None:
continue
updates = {'vm_state': vm_states.ERROR, 'task_state': None}
cell = cell_mapping_cache[instance.uuid]
with try_target_cell(context, cell) as cctxt:
self._set_vm_state_and_notify(cctxt, instance.uuid,
'build_instances', updates, exc,
request_spec)
# In order to properly clean-up volumes when deleting a server in
# ERROR status with no host, we need to store BDMs in the same
# cell.
if block_device_mappings:
self._create_block_device_mapping(
cell, instance.flavor, instance.uuid,
block_device_mappings)
# Like BDMs, the server tags provided by the user when creating the
# server should be persisted in the same cell so they can be shown
# from the API.
if tags:
with nova_context.target_cell(context, cell) as cctxt:
self._create_tags(cctxt, instance.uuid, tags)
# NOTE(mdbooth): To avoid an incomplete instance record being
# returned by the API, the instance mapping must be
# created after the instance record is complete in
# the cell, and before the build request is
# destroyed.
# TODO(mnaser): The cell mapping should already be populated by
# this point to avoid setting it below here.
inst_mapping = objects.InstanceMapping.get_by_instance_uuid(
context, instance.uuid)
inst_mapping.cell_mapping = cell
inst_mapping.save()
# Be paranoid about artifacts being deleted underneath us.
try:
build_request.destroy()
except exception.BuildRequestNotFound:
pass
try:
request_spec.destroy()
except exception.RequestSpecNotFound:
pass
def _delete_build_request(self, context, build_request, instance, cell,
instance_bdms, instance_tags):
"""Delete a build request after creating the instance in the cell.
This method handles cleaning up the instance in case the build request
is already deleted by the time we try to delete it.
:param context: the context of the request being handled
:type context: nova.context.RequestContext
:param build_request: the build request to delete
:type build_request: nova.objects.BuildRequest
:param instance: the instance created from the build_request
:type instance: nova.objects.Instance
:param cell: the cell in which the instance was created
:type cell: nova.objects.CellMapping
:param instance_bdms: list of block device mappings for the instance
:type instance_bdms: nova.objects.BlockDeviceMappingList
:param instance_tags: list of tags for the instance
:type instance_tags: nova.objects.TagList
:returns: True if the build request was successfully deleted, False if
the build request was already deleted and the instance is now gone.
"""
try:
build_request.destroy()
except exception.BuildRequestNotFound:
# This indicates an instance deletion request has been
# processed, and the build should halt here. Clean up the
# bdm, tags and instance record.
with obj_target_cell(instance, cell) as cctxt:
with compute_utils.notify_about_instance_delete(
self.notifier, cctxt, instance,
source=fields.NotificationSource.CONDUCTOR):
try:
instance.destroy()
except exception.InstanceNotFound:
pass
except exception.ObjectActionError:
# NOTE(melwitt): Instance became scheduled during
# the destroy, "host changed". Refresh and re-destroy.
try:
instance.refresh()
instance.destroy()
except exception.InstanceNotFound:
pass
for bdm in instance_bdms:
with obj_target_cell(bdm, cell):
try:
bdm.destroy()
except exception.ObjectActionError:
pass
if instance_tags:
with try_target_cell(context, cell) as target_ctxt:
try:
objects.TagList.destroy(target_ctxt, instance.uuid)
except exception.InstanceNotFound:
pass
return False
return True
def cache_images(self, context, aggregate, image_ids):
"""Cache a set of images on the set of hosts in an aggregate.
:param context: The RequestContext
:param aggregate: The Aggregate object from the request to constrain
the host list
:param image_id: The IDs of the image to cache
"""
# TODO(mriedem): Consider including the list of images in the
# notification payload.
compute_utils.notify_about_aggregate_action(
context, aggregate,
fields.NotificationAction.IMAGE_CACHE,
fields.NotificationPhase.START)
clock = timeutils.StopWatch()
threads = CONF.image_cache.precache_concurrency
fetch_pool = eventlet.GreenPool(size=threads)
hosts_by_cell = {}
cells_by_uuid = {}
# TODO(danms): Make this a much more efficient bulk query
for hostname in aggregate.hosts:
hmap = objects.HostMapping.get_by_host(context, hostname)
cells_by_uuid.setdefault(hmap.cell_mapping.uuid, hmap.cell_mapping)
hosts_by_cell.setdefault(hmap.cell_mapping.uuid, [])
hosts_by_cell[hmap.cell_mapping.uuid].append(hostname)
LOG.info('Preparing to request pre-caching of image(s) %(image_ids)s '
'on %(hosts)i hosts across %(cells)i cells.',
{'image_ids': ','.join(image_ids),
'hosts': len(aggregate.hosts),
'cells': len(hosts_by_cell)})
clock.start()
stats = collections.defaultdict(lambda: (0, 0, 0, 0))
failed_images = collections.defaultdict(int)
down_hosts = set()
host_stats = {
'completed': 0,
'total': len(aggregate.hosts),
}
def host_completed(context, host, result):
for image_id, status in result.items():
cached, existing, error, unsupported = stats[image_id]
if status == 'error':
failed_images[image_id] += 1
error += 1
elif status == 'cached':
cached += 1
elif status == 'existing':
existing += 1
elif status == 'unsupported':
unsupported += 1
stats[image_id] = (cached, existing, error, unsupported)
host_stats['completed'] += 1
compute_utils.notify_about_aggregate_cache(context, aggregate,
host, result,
host_stats['completed'],
host_stats['total'])
def wrap_cache_images(ctxt, host, image_ids):
result = self.compute_rpcapi.cache_images(
ctxt,
host=host,
image_ids=image_ids)
host_completed(context, host, result)
def skipped_host(context, host, image_ids):
result = {image: 'skipped' for image in image_ids}
host_completed(context, host, result)
for cell_uuid, hosts in hosts_by_cell.items():
cell = cells_by_uuid[cell_uuid]
with nova_context.target_cell(context, cell) as target_ctxt:
for host in hosts:
service = objects.Service.get_by_compute_host(target_ctxt,
host)
if not self.servicegroup_api.service_is_up(service):
down_hosts.add(host)
LOG.info(
'Skipping image pre-cache request to compute '
'%(host)r because it is not up',
{'host': host})
skipped_host(target_ctxt, host, image_ids)
continue
fetch_pool.spawn_n(wrap_cache_images, target_ctxt, host,
image_ids)
# Wait until all those things finish
fetch_pool.waitall()
overall_stats = {'cached': 0, 'existing': 0, 'error': 0,
'unsupported': 0}
for cached, existing, error, unsupported in stats.values():
overall_stats['cached'] += cached
overall_stats['existing'] += existing
overall_stats['error'] += error
overall_stats['unsupported'] += unsupported
clock.stop()
LOG.info('Image pre-cache operation for image(s) %(image_ids)s '
'completed in %(time).2f seconds; '
'%(cached)i cached, %(existing)i existing, %(error)i errors, '
'%(unsupported)i unsupported, %(skipped)i skipped (down) '
'hosts',
{'image_ids': ','.join(image_ids),
'time': clock.elapsed(),
'cached': overall_stats['cached'],
'existing': overall_stats['existing'],
'error': overall_stats['error'],
'unsupported': overall_stats['unsupported'],
'skipped': len(down_hosts),
})
# Log error'd images specifically at warning level
for image_id, fails in failed_images.items():
LOG.warning('Image pre-cache operation for image %(image)s '
'failed %(fails)i times',
{'image': image_id,
'fails': fails})
compute_utils.notify_about_aggregate_action(
context, aggregate,
fields.NotificationAction.IMAGE_CACHE,
fields.NotificationPhase.END)
@targets_cell
@wrap_instance_event(prefix='conductor')
def confirm_snapshot_based_resize(self, context, instance, migration):
"""Executes the ConfirmResizeTask
:param context: nova auth request context targeted at the target cell
:param instance: Instance object in "resized" status from the target
cell
:param migration: Migration object from the target cell for the resize
operation expected to have status "confirming"
"""
task = cross_cell_migrate.ConfirmResizeTask(
context, instance, migration, self.notifier, self.compute_rpcapi)
task.execute()
@targets_cell
# NOTE(mriedem): Upon successful completion of RevertResizeTask the
# instance is hard-deleted, along with its instance action record(s), from
# the target cell database so EventReporter hits InstanceActionNotFound on
# __exit__. Pass graceful_exit=True to avoid an ugly traceback.
@wrap_instance_event(prefix='conductor', graceful_exit=True)
def revert_snapshot_based_resize(self, context, instance, migration):
"""Executes the RevertResizeTask
:param context: nova auth request context targeted at the target cell
:param instance: Instance object in "resized" status from the target
cell
:param migration: Migration object from the target cell for the resize
operation expected to have status "reverting"
"""
task = cross_cell_migrate.RevertResizeTask(
context, instance, migration, self.notifier, self.compute_rpcapi)
task.execute()
|
py | 1a3be42084827ada4f3b47a3129adf62739d2836 | import os
import urllib.request
import subprocess
import time
import ssl
import requests
from test_workflow.test_cluster import TestCluster, ClusterCreationException
class LocalTestCluster(TestCluster):
'''
Represents an on-box test cluster. This class downloads a bundle (from a BundleManifest) and runs it as a background process.
'''
def __init__(self, work_dir, bundle_manifest, security_enabled):
self.manifest = bundle_manifest
self.work_dir = os.path.join(work_dir, 'local-test-cluster')
os.makedirs(self.work_dir, exist_ok = True)
self.security_enabled = security_enabled
self.process = None
def create(self):
self.download()
self.stdout = open('stdout.txt', 'w')
self.stderr = open('stderr.txt', 'w')
dir = f'opensearch-{self.manifest.build.version}'
if not self.security_enabled:
self.disable_security(dir)
self.process = subprocess.Popen('./opensearch-tar-install.sh', cwd = dir, shell = True, stdout = self.stdout, stderr = self.stderr)
print(f'Started OpenSearch with PID {self.process.pid}')
self.wait_for_service()
def endpoint(self):
return 'localhost'
def port(self):
return 9200
def destroy(self):
if self.process is None:
print('Local test cluster is not started')
return
print(f'Sending SIGTERM to PID {self.process.pid}')
self.process.terminate()
try:
print('Waiting for process to terminate')
self.process.wait(10)
except TimeoutExpired:
print('Process did not terminate after 10 seconds. Sending SIGKILL')
self.process.kill()
try:
print('Waiting for process to terminate')
self.process.wait(10)
except TimeoutExpired:
print('Process failed to terminate even after SIGKILL')
raise
finally:
print(f'Process terminated with exit code {self.process.returncode}')
self.stdout.close()
self.stderr.close()
self.process = None
def url(self, path=''):
return f'{"https" if self.security_enabled else "http"}://{self.endpoint()}:{self.port()}{path}'
def download(self):
print(f'Creating local test cluster in {self.work_dir}')
os.chdir(self.work_dir)
print(f'Downloading bundle from {self.manifest.build.location}')
urllib.request.urlretrieve(self.manifest.build.location, 'bundle.tgz')
print(f'Downloaded bundle to {os.path.realpath("bundle.tgz")}')
print('Unpacking')
subprocess.check_call('tar -xzf bundle.tgz', shell = True)
print('Unpacked')
def disable_security(self, dir):
subprocess.check_call(f'echo "plugins.security.disabled: true" >> {os.path.join(dir, "config", "opensearch.yml")}', shell = True)
def wait_for_service(self):
print('Waiting for service to become available')
url = self.url('/_cluster/health')
for attempt in range(10):
try:
print(f'Pinging {url} attempt {attempt}')
response = requests.get(url, verify = False, auth = ('admin', 'admin'))
print(f'{response.status_code}: {response.text}')
if response.status_code == 200 and '"status":"green"' in response.text:
print('Cluster is green')
return
except requests.exceptions.ConnectionError:
print(f'Service not available yet')
time.sleep(10)
raise ClusterCreationException('Cluster is not green after 10 attempts')
|
py | 1a3be421627fe8ed8de9ed385b686ded0706872e | from django.contrib.auth.models import User
from django.urls import reverse
from django.db import models
from django.utils import timezone
from django.utils.safestring import mark_safe
class Issue(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
submitted = models.DateTimeField(auto_now_add=True)
edited = models.DateTimeField(auto_now=True, blank=True, null=True)
subject = models.CharField(max_length=40)
description = models.TextField(help_text=mark_safe('<a href="https://daringfireball.net/projects/markdown/syntax" target="_blank">Markdown syntax</a> enabled'))
public = models.BooleanField(default=True)
closed = models.BooleanField(default=False)
latest_comment = models.DateTimeField(default=timezone.now)
github_issue_url = models.URLField(null=True, blank=True)
def __str__(self):
return self.subject
def get_absolute_url(self):
return reverse('feedback:issue_detail', kwargs={'pk': self.pk})
def comment_count(self):
return Discussion.objects.filter(feedback=self).count()
def get_status_html(self):
if self.closed:
if self.github_issue_url:
return mark_safe('<a href="{}" target="_blank" class="btn badge bg-success"><i class="fas fa-check"></i> Implemented</a>'.format(
self.github_issue_url,
))
else:
return mark_safe('<span class="badge bg-secondary">Closed</span>')
else:
if self.github_issue_url:
return mark_safe('<a href="{}" target="_blank" class="btn badge bg-info">Accepted - Tracked on <i class="fab fa-github"></i> GitHub</a>'.format(
self.github_issue_url,
))
else:
return mark_safe('<span class="badge bg-secondary">Under Discussion</span>')
class Meta:
ordering = ['-latest_comment']
class Discussion(models.Model):
feedback = models.ForeignKey(Issue, on_delete=models.CASCADE)
user = models.ForeignKey(User, on_delete=models.CASCADE)
timestamp = models.DateTimeField(auto_now_add=True)
edited = models.DateTimeField(auto_now=True, blank=True, null=True)
comment = models.TextField(help_text=mark_safe('<a href="https://daringfireball.net/projects/markdown/syntax" target="_blank">Markdown syntax</a> enabled'))
def __str__(self):
return str(self.feedback) + ' ' + str(self.timestamp)
def save(self, *args, **kwargs):
super(Discussion, self).save(*args, **kwargs)
# If this is the latest comment, send a pointer back to the main discussion article.
if self.feedback and self.feedback.latest_comment < self.timestamp:
self.feedback.latest_comment = self.timestamp
self.feedback.save()
class Meta:
ordering = ('timestamp',)
|
py | 1a3be78deb5a5fb12a485af2c5314fc309c03944 | # Copyright 2016-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import sys
from botocore.exceptions import ClientError
from c7n.exceptions import PolicyValidationError
from c7n.executor import MainThreadExecutor
from c7n.resources.ebs import (
CopyInstanceTags,
EncryptInstanceVolumes,
CopySnapshot,
Delete,
QueryParser
)
from .common import BaseTest, TestConfig as Config
class SnapshotQueryParse(BaseTest):
def test_query(self):
qfilters = [
{'Name': 'tag:Name', 'Values': ['Snapshot1']},
{'Name': 'status', 'Values': ['completed']}]
self.assertEqual(qfilters, QueryParser.parse(qfilters))
def test_invalid_query(self):
self.assertRaises(
PolicyValidationError, QueryParser.parse, {})
self.assertRaises(
PolicyValidationError, QueryParser.parse, [None])
self.assertRaises(
PolicyValidationError, QueryParser.parse, [{'X': 1}])
self.assertRaises(
PolicyValidationError, QueryParser.parse, [
{'Name': 'status', 'Values': 'completed'}])
self.assertRaises(
PolicyValidationError, QueryParser.parse, [
{'Name': 'status', 'Values': ['Completed']}])
self.assertRaises(
PolicyValidationError, QueryParser.parse, [
{'Name': 'snapshot-id', 'Values': [1]}])
class SnapshotAccessTest(BaseTest):
def test_snapshot_access(self):
# pre conditions, 2 snapshots one shared to a separate account, and one
# shared publicly. 2 non matching volumes, one not shared, one shared
# explicitly to its own account.
self.patch(CopySnapshot, "executor_factory", MainThreadExecutor)
factory = self.replay_flight_data("test_ebs_cross_account")
p = self.load_policy(
{
"name": "snap-copy",
"resource": "ebs-snapshot",
"filters": ["cross-account"],
},
config=Config.empty(),
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 2)
self.assertEqual(
{r["SnapshotId"]: r["c7n:CrossAccountViolations"] for r in resources},
{"snap-7f9496cf": ["619193117841"], "snap-af0eb71b": ["all"]},
)
class SnapshotDetachTest(BaseTest):
def test_volume_detach(self):
factory = self.replay_flight_data('test_ebs_detach')
p = self.load_policy(
{
'name': 'volume-detach',
'resource': 'ebs',
'filters': [{'VolumeId': 'vol-0850cf7c8e949c318'}],
'actions': [
{
'type': 'detach'
}
]
}, config=Config.empty(), session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
client = factory(region="us-east-1").client('ec2')
volumelist = []
volumelist.append(resources[0]['VolumeId'])
response = client.describe_volumes(VolumeIds=volumelist)
for resp in response['Volumes']:
for attachment in resp['Attachments']:
self.assertTrue(attachment['State'] == "detached" or
attachment['State'] == "detaching")
class SnapshotCopyTest(BaseTest):
def test_snapshot_copy(self):
self.patch(CopySnapshot, "executor_factory", MainThreadExecutor)
self.change_environment(AWS_DEFAULT_REGION="us-west-2")
factory = self.replay_flight_data("test_ebs_snapshot_copy")
p = self.load_policy(
{
"name": "snap-copy",
"resource": "ebs-snapshot",
"filters": [{"tag:ASV": "RoadKill"}],
"actions": [
{
"type": "copy",
"target_region": "us-east-1",
"target_key": "82645407-2faa-4d93-be71-7d6a8d59a5fc",
}
],
},
Config.empty(region="us-west-2"),
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
client = factory(region="us-east-1").client("ec2")
tags = client.describe_tags(
Filters=[
{"Name": "resource-id", "Values": [resources[0]["c7n:CopiedSnapshot"]]}
]
)[
"Tags"
]
tags = {t["Key"]: t["Value"] for t in tags}
self.assertEqual(tags["ASV"], "RoadKill")
class SnapshotAmiSnapshotTest(BaseTest):
def test_snapshot_ami_snapshot_filter(self):
self.patch(CopySnapshot, "executor_factory", MainThreadExecutor)
# DEFAULT_REGION needs to be set to west for recording
factory = self.replay_flight_data("test_ebs_ami_snapshot_filter")
# first case should return only resources that are ami snapshots
p = self.load_policy(
{
"name": "ami-snap-filter",
"resource": "ebs-snapshot",
"filters": [{"type": "skip-ami-snapshots", "value": False}],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 3)
# second case should return resources that are NOT ami snapshots
policy = self.load_policy(
{
"name": "non-ami-snap-filter",
"resource": "ebs-snapshot",
"filters": [{"type": "skip-ami-snapshots", "value": True}],
},
session_factory=factory,
)
resources = policy.run()
self.assertEqual(len(resources), 2)
class SnapshotTrimTest(BaseTest):
def test_snapshot_trim(self):
factory = self.replay_flight_data("test_ebs_snapshot_delete")
p = self.load_policy(
{
"name": "snapshot-trim",
"resource": "ebs-snapshot",
"filters": [{"tag:InstanceId": "not-null"}],
"actions": ["delete"],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
class AttachedInstanceTest(BaseTest):
def test_ebs_instance_filter(self):
factory = self.replay_flight_data("test_ebs_instance_filter")
p = self.load_policy(
{
"name": "attached-instance-test",
"resource": "ebs",
"filters": [
{"type": "instance", "key": "tag:Name", "value": "CompiledLambda"}
],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
class ResizeTest(BaseTest):
def test_resize_action(self):
factory = self.replay_flight_data("test_ebs_modifyable_action")
client = factory().client("ec2")
# Change a volume from 32 gb gp2 and 100 iops (sized based) to
# 64gb and 500 iops.
vol_id = "vol-0073dcd216489ea1b"
p = self.load_policy(
{
"name": "resizable",
"resource": "ebs",
"filters": ["modifyable", {"VolumeId": vol_id}],
"actions": [
{
"type": "modify",
"volume-type": "io1",
"size-percent": 200,
"iops-percent": 500,
}
],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(resources[0]["Iops"], 100)
self.assertEqual(resources[0]["Size"], 32)
vol = client.describe_volumes(VolumeIds=[vol_id])["Volumes"][0]
self.assertEqual(vol["Iops"], 500)
self.assertEqual(vol["Size"], 64)
def test_resize_filter(self):
# precondition, 6 volumes, 4 not modifyable.
factory = self.replay_flight_data("test_ebs_modifyable_filter")
output = self.capture_logging("custodian.filters", level=logging.DEBUG)
p = self.load_policy(
{"name": "resizable", "resource": "ebs", "filters": ["modifyable"]},
session_factory=factory,
)
resources = p.run()
self.assertEqual(
{r["VolumeId"] for r in resources},
set(("vol-0073dcd216489ea1b", "vol-0e4cba7adc4764f79")),
)
# normalizing on str/unicode repr output between versions.. punt
if sys.version_info[0] > 2:
return
self.assertEqual(
output.getvalue().strip(),
(
"filtered 4 of 6 volumes due to [(u'instance-type', 2), "
"(u'vol-mutation', 1), (u'vol-type', 1)]"
),
)
class CopyInstanceTagsTest(BaseTest):
def test_copy_instance_tags(self):
# More a functional/coverage test then a unit test.
self.patch(CopyInstanceTags, "executor_factory", MainThreadExecutor)
factory = self.replay_flight_data("test_ebs_copy_instance_tags")
volume_id = "vol-2b047792"
results = factory().client("ec2").describe_tags(
Filters=[{"Name": "resource-id", "Values": [volume_id]}]
)[
"Tags"
]
tags = {t["Key"]: t["Value"] for t in results}
self.assertEqual(tags, {})
policy = self.load_policy(
{
"name": "test-copy-instance-tags",
"resource": "ebs",
"actions": [{"type": "copy-instance-tags", "tags": ["Name"]}],
},
config={"region": "us-west-2"},
session_factory=factory,
)
policy.run()
results = factory().client("ec2").describe_tags(
Filters=[{"Name": "resource-id", "Values": [volume_id]}]
)[
"Tags"
]
tags = {t["Key"]: t["Value"] for t in results}
self.assertEqual(tags["Name"], "CompileLambda")
class VolumeSnapshotTest(BaseTest):
def test_volume_snapshot(self):
factory = self.replay_flight_data("test_ebs_snapshot")
policy = self.load_policy(
{
"name": "test-ebs-snapshot",
"resource": "ebs",
"filters": [{"VolumeId": "vol-01adbb6a4f175941d"}],
"actions": ["snapshot"],
},
session_factory=factory,
)
policy.run()
snapshot_data = factory().client("ec2").describe_snapshots(
Filters=[{"Name": "volume-id", "Values": ["vol-01adbb6a4f175941d"]}]
)
self.assertEqual(len(snapshot_data["Snapshots"]), 1)
class VolumeDeleteTest(BaseTest):
def test_volume_delete_force(self):
self.patch(Delete, "executor_factory", MainThreadExecutor)
factory = self.replay_flight_data("test_ebs_force_delete")
policy = self.load_policy(
{
"name": "test-ebs",
"resource": "ebs",
"filters": [{"VolumeId": "vol-d0790258"}],
"actions": [{"type": "delete", "force": True}],
},
session_factory=factory,
)
resources = policy.run()
try:
factory().client("ec2").describe_volumes(
VolumeIds=[resources[0]["VolumeId"]]
)
except ClientError as e:
self.assertEqual(e.response["Error"]["Code"], "InvalidVolume.NotFound")
else:
self.fail("Volume still exists")
class EncryptExtantVolumesTest(BaseTest):
def test_encrypt_volumes(self):
self.patch(EncryptInstanceVolumes, "executor_factory", MainThreadExecutor)
session_factory = self.replay_flight_data("test_encrypt_volumes")
policy = self.load_policy(
{
"name": "ebs-remediate-attached",
"resource": "ebs",
"filters": [
{"Encrypted": False}, {"VolumeId": "vol-0f53c81b92b4ecfce"}
],
"actions": [
{
"type": "encrypt-instance-volumes",
"delay": 0.001,
"key": "alias/encryptebs",
}
],
},
session_factory=session_factory,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
for r in resources:
volumes = session_factory().client("ec2").describe_volumes(
Filters=[
{
"Name": "attachment.instance-id",
"Values": [r["Attachments"][0]["InstanceId"]],
}
]
)
for v in volumes["Volumes"]:
self.assertTrue(v["Attachments"][0]["DeleteOnTermination"])
self.assertTrue(v["Encrypted"])
if "Tags" in v:
self.assertNotIn(
"maid-crypt-remediation", [i["Key"] for i in v["Tags"]]
)
self.assertNotIn(
"maid-origin-volume", [i["Key"] for i in v["Tags"]]
)
self.assertNotIn(
"maid-instance-device", [i["Key"] for i in v["Tags"]]
)
class TestKmsAlias(BaseTest):
def test_ebs_kms_alias(self):
session_factory = self.replay_flight_data("test_ebs_aws_managed_kms_keys")
p = self.load_policy(
{
"name": "ebs-aws-managed-kms-keys-filters",
"resource": "ebs",
"filters": [
{
"type": "kms-alias",
"key": "AliasName",
"value": "^(alias/aws/)",
"op": "regex",
}
],
},
config={"region": "us-west-2"},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["VolumeId"], "vol-14a3cd9d")
class EbsFaultToleranceTest(BaseTest):
def test_ebs_fault_tolerant(self):
session = self.replay_flight_data("test_ebs_fault_tolerant")
policy = self.load_policy(
{
"name": "ebs-fault-tolerant",
"resource": "ebs",
"filters": ["fault-tolerant"],
},
session_factory=session,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["VolumeId"], "vol-c5eaa459")
def test_ebs_non_fault_tolerant(self):
session = self.replay_flight_data("test_ebs_non_fault_tolerant")
policy = self.load_policy(
{
"name": "ebs-non-fault-tolerant",
"resource": "ebs",
"filters": [{"type": "fault-tolerant", "tolerant": False}],
},
session_factory=session,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["VolumeId"], "vol-abdb8d37")
class PiopsMetricsFilterTest(BaseTest):
def test_ebs_metrics_percent_filter(self):
session = self.replay_flight_data("test_ebs_metrics_percent_filter")
policy = self.load_policy(
{
"name": "ebs-unused-piops",
"resource": "ebs",
"filters": [
{
"type": "metrics",
"name": "VolumeConsumedReadWriteOps",
"op": "lt",
"value": 50,
"statistics": "Maximum",
"days": 1,
"percent-attr": "Iops",
}
],
},
session_factory=session,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
class HealthEventsFilterTest(BaseTest):
def test_ebs_health_events_filter(self):
session_factory = self.replay_flight_data("test_ebs_health_events_filter")
policy = self.load_policy(
{
"name": "ebs-health-events-filter",
"resource": "ebs",
"filters": [{"type": "health-event", "types": ["AWS_EBS_VOLUME_LOST"]}],
},
session_factory=session_factory,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
for r in resources:
self.assertTrue(
("c7n:HealthEvent" in r) and
("Description" in e for e in r["c7n:HealthEvent"])
)
|
py | 1a3be8374fe129631d3868361047038687828a13 | import sys
import os
from collections import defaultdict
filenames=[]
filenames.append('deep_ana0.08.csv')
dir1='GO_analysis_gprofiler_results'
os.system('mkdir '+dir1)
for filename in filenames:
file_lines=open(filename).readlines()
cell_type_group_dict={}
group_cell_type_dict=defaultdict(lambda:[])
dir2=dir1+'/'+filename
os.system('mkdir '+dir2)
dir3=dir2+'/all_groups'
os.system('mkdir '+dir3)
for line in file_lines[1:17]:
line = line.replace('\n','')
splits = line.split(',')
cell_type = splits[0]
cell_type_dir=dir2+'/'+cell_type
os.system('mkdir '+cell_type_dir)
groups = splits[2:]
cell_type_group_dict[cell_type]=groups
for gp in groups:
group_cell_type_dict[gp].append(cell_type)
for line in file_lines[18:]:
line = line.replace('\n','')
splits = line.split(',')
group = splits[0]
count = int(splits[1])
genes = splits[2:]
if group.startswith('TF'):
continue
head=["query", "significant", "p_value", "T", "Q", "Q&T", "precision", "recall", "term_id","domain", "group", "description", "depth", "intersection", "evcodes"]
out_file=dir3+'/'+group+'.txt'
cmd = 'printf "'+'\\t'.join(head)+'\\n" > '+out_file
print cmd
cmd = 'python ~/gprofiler-official-0.2.3/gprofiler.py -o mmusculus "'+ ' '.join(genes) + '" >> '+out_file
print cmd
for ct in group_cell_type_dict[group]:
cell_type_dir=dir2+'/'+ct
cmd = 'cp '+out_file+' '+cell_type_dir
print cmd
|
py | 1a3be8419f48949c0ab917d5c48155e49851bfa9 | # This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import pytest
from indico.modules.users import User
pytest_plugins = 'indico.modules.rb.testing.fixtures'
@pytest.mark.parametrize('bulk_possible', (True, False))
def test_managed_rooms(monkeypatch, bulk_possible, create_user, create_room, dummy_user):
from indico.modules.rb.operations.rooms import get_managed_room_ids
monkeypatch.setattr(User, 'can_get_all_multipass_groups', bulk_possible)
users = {
'x': {'first_name': 'Regular', 'last_name': 'User'},
'y': {'first_name': 'Room', 'last_name': 'Owner'},
'z': {'first_name': 'ManyRooms', 'last_name': 'Owner'}
}
rooms = {
'a': {'verbose_name': 'Red room', 'owner': 'z'},
'b': {'verbose_name': 'Blue room', 'owner': 'y'},
'c': {'verbose_name': 'Green room', 'owner': 'y'}
}
user_map = {key: create_user(id_, **data) for id_, (key, data) in enumerate(users.iteritems(), 1)}
room_map = {}
for id_, (key, data) in enumerate(rooms.iteritems(), 1):
data['id'] = id_
data['owner'] = user_map[data['owner']]
room_map[key] = create_room(**data)
room_map['a'].update_principal(user_map['y'], full_access=True)
for key, user in user_map.iteritems():
room_ids = [room.id for room in room_map.values() if (room.owner == user_map[key] or room.can_manage(user))]
assert get_managed_room_ids(user) == set(room_ids)
|
py | 1a3be874586c54e80097f3991b60dc4e229b1674 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import scipy as sp
import scanpy as sc
def pearson_residuals(counts, theta, clipping=True):
'''Computes analytical residuals for NB model with a fixed theta, clipping outlier residuals to sqrt(N)'''
counts_sum0 = np.sum(counts, axis=0, keepdims=True)
counts_sum1 = np.sum(counts, axis=1, keepdims=True)
counts_sum = np.sum(counts)
#get residuals
mu = counts_sum1 @ counts_sum0 / counts_sum
z = (counts - mu) / np.sqrt(mu + mu**2/theta)
#clip to sqrt(n)
if clipping:
n = counts.shape[0]
z[z > np.sqrt(n)] = np.sqrt(n)
z[z < -np.sqrt(n)] = -np.sqrt(n)
return z
def read_dataset(adata, transpose=False, copy=False):
if isinstance(adata, sc.AnnData):
if copy:
adata = adata.copy()
elif isinstance(adata, str):
adata = sc.read(adata)
else:
raise NotImplementedError
norm_error = 'Make sure that the dataset (adata.X) contains unnormalized count data.'
assert 'n_count' not in adata.obs, norm_error
if adata.X.size < 50e6: # check if adata.X is integer only if array is small
if sp.sparse.issparse(adata.X):
assert (adata.X.astype(int) != adata.X).nnz == 0, norm_error
else:
assert np.all(adata.X.astype(int) == adata.X), norm_error
if transpose: adata = adata.transpose()
print('### Autoencoder: Successfully preprocessed {} genes and {} cells.'.format(adata.n_vars, adata.n_obs))
return adata
def normalize_training(adata, filter_min_counts=True, size_factors=True, normalize_input=True, logtrans_input=True):
if filter_min_counts:
sc.pp.filter_genes(adata, min_counts=1)
sc.pp.filter_cells(adata, min_counts=1)
if size_factors or normalize_input or logtrans_input:
adata.raw = adata.copy()
else:
adata.raw = adata
if size_factors:
sc.pp.normalize_per_cell(adata)
adata.obs['size_factors'] = adata.obs.n_counts / np.median(adata.obs.n_counts)
else:
adata.obs['size_factors'] = 1.0
if logtrans_input:
sc.pp.log1p(adata)
if normalize_input:
sc.pp.scale(adata)
return adata
def normalize_testing(adata, training_median_n_counts, training_mean, training_std, filter_min_counts=True, size_factors=True, normalize_input=True, logtrans_input=True):
if filter_min_counts:
sc.pp.filter_genes(adata, min_counts=1)
sc.pp.filter_cells(adata, min_counts=1)
if size_factors or normalize_input or logtrans_input:
adata.raw = adata.copy()
else:
adata.raw = adata
if size_factors:
sc.pp.normalize_per_cell(adata)
adata.obs['size_factors'] = adata.obs.n_counts / training_median_n_counts
else:
adata.obs['size_factors'] = 1.0
if logtrans_input:
sc.pp.log1p(adata)
if normalize_input:
adata.X = (adata.X - np.array(training_mean)) / np.array(training_std)
return adata
|
py | 1a3bea6a2afaecddb52274d73eaddde6e36e0fd3 | import gzip
import os
import shutil
from pathlib import Path
from stat import S_IXUSR, S_IWUSR, S_IRUSR
from .._sync_http import client
def download(url: str, to: Path) -> None:
to.parent.mkdir(parents=True, exist_ok=True)
tar = to.with_suffix('.gz.tmp')
client.download(url, tar)
# decompress to a tmp file before replacing the original
with gzip.open(tar, 'rb') as f_in:
with open(to, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
# chmod +x
os.chmod(to, S_IXUSR | S_IWUSR | S_IRUSR)
# remove temporary files
os.remove(tar)
|
py | 1a3beb92b065d95cfbcd881ec7920971f6ddfd30 | import contextlib
import io
from elftools.elf.elffile import ELFFile
from elftools.dwarf.die import DIE
from elftools.dwarf.die import AttributeValue
from elftools.dwarf.descriptions import describe_DWARF_expr, set_global_machine_arch
from elftools.dwarf.locationlists import LocationEntry, LocationExpr, LocationParser
class DebugInfo:
def debug_info(self, show=None, **kwargs):
"""Print a summary of the debugging info for the compiled code.
This is the data that debuggers use to make debugging a program
comprehensible. It includes variable and function names, types, file
names, line numbers, etc.
Currently only `DWARF4 <https://dwarfstd.org/doc/DWARF4.pdf>`_ is
supported, which is the standard on Linux systems.
In order for debugging information to present, the code must be
compiled with :code:`-g`.
Args:
show: What to show -- a function name. Defaults to ``None`` which will display all the debugging info.
Returns:
:code:`str`: String rendering the DWARF data for the file or function. This can be very long.
"""
self.show = show
with self.DWARFInfo() as dwarfinfo:
if dwarfinfo is None:
return f"No debugging data in {self.lib}"
for CU in dwarfinfo.iter_CUs():
top_DIE = CU.get_top_DIE()
return DWARFRenderer(top_DIE, show).render()
return f"No Compilation units in {self.lib}."
def stack_frame(self, show, **kwargs):
"""Print the stack frame layout for a function.
This returns a description of where each variable and argument
resides on the stack or in registers.
For instance:
.. doctest::
>>> from cfiddle import *
>>> sample = code(r'''
... extern "C"
... int foo(int a) {
... register int sum = 0;
... for(int i = 0; i < 10; i++) {
... sum += i;
... }
... return sum;
... }
... ''')
>>> stack_frame = build(sample)[0].stack_frame("foo")
>>> print(stack_frame) # doctest: +SKIP
function foo
a: (DW_OP_fbreg: -44)
sum: (DW_OP_reg3 (rbx))
i: (DW_OP_fbreg: -28)
The format is potentially complicated (the DWARF format is Turing
complelete!), but most entries are easy to understand.
The example above shows that :code:`a` is store at -44 bytes relative
to the frame base register and :code:`sum` is a register.
This is a work in progress. Here's the `Dwarf4 spec
<https://dwarfstd.org/doc/DWARF4.pdf>`_ and the source code for
`pyelftools <https://github.com/eliben/pyelftools>`_, which is reasonably well documented.
Pull requests welcome :-).
Args:
show: Function to extract the frame layout from.
Returns:
:code:`str`: A description of the layout
"""
output = io.StringIO()
current_function = None
self._set_machine_architecture()
def emit(s):
if current_function == show:
output.write(s)
with self.DWARFInfo() as dwarfinfo:
loc_parser = self._build_location_parser(dwarfinfo)
for CU in dwarfinfo.iter_CUs():
for DIE in CU.iter_DIEs():
if DIE.tag == "DW_TAG_subprogram":
current_function = self._extract_name(DIE)
emit(self._render_function_name(DIE))
elif DIE.tag in ["DW_TAG_formal_parameter", "DW_TAG_variable"]:
if current_function == show:
emit(self._render_variable_location(DIE, CU, dwarfinfo, loc_parser))
return output.getvalue()
def _render_variable_location(self, DIE, CU, dwarfinfo, loc_parser):
if "DW_AT_name" in DIE.attributes:
name = DIE.attributes['DW_AT_name'].value.decode()
else:
name = "<unnamed>"
if "DW_AT_location" not in DIE.attributes:
return f"{name} has no location\n"
else:
loc = loc_parser.parse_from_attribute(DIE.attributes["DW_AT_location"], CU['version'])
if isinstance(loc, LocationExpr):
offset = describe_DWARF_expr(loc.loc_expr, dwarfinfo.structs, CU.cu_offset)
return f" {name}: {offset}\n"
else:
return f" {name}: <not a location>\n"
def _set_machine_architecture(self):
with self.ELFFile() as elffile: # This is required for the descriptions module to correctly decode
# register names contained in DWARF expressions.
set_global_machine_arch(elffile.get_machine_arch())
def _render_function_name(self, DIE):
n = self._extract_name(DIE)
if n is None:
return f"function <anon>\n"
else:
return f"function {n}\n"
def _extract_name(self, DIE):
if "DW_AT_name" in DIE.attributes:
return DIE.attributes['DW_AT_name'].value.decode()
else:
return None
def _build_location_parser(self, dwarfinfo):
location_lists = dwarfinfo.location_lists()
return LocationParser(location_lists)
@contextlib.contextmanager
def DWARFInfo(self):
"""Context manager for the raw :code:`DWARFInfo` object for the compiled code.
Returns:
:code:`DWARFInfo`: :code:`DWARFInfo` object created by `pyelftools <https://github.com/eliben/pyelftools>`_.
"""
try:
with self.ELFFile() as elffile:
if not elffile.has_dwarf_info():
yield None
else:
# we need to yield because the elftools hasn't finished parsing yet
yield elffile.get_dwarf_info()
finally:
pass
@contextlib.contextmanager
def ELFFile(self):
"""Context manager for the raw :code:`ELFFile` object for the compiled code.
Returns:
:code:`ELFFile`: :code:`ELFFile` object created by `pyelftools <https://github.com/eliben/pyelftools>`_.
"""
try:
with open(self.lib, 'rb') as f:
yield ELFFile(f)
finally:
pass
class DWARFRenderer:
def __init__(self, die, show):
self.root = die
self.show = show
if self.show is None:
self.printing = 1
else:
self.printing = 0
self.output = io.StringIO()
self.indent = 0
def render(self):
self._die_info_rec(self.root)
return self.output.getvalue()
def _die_info_rec(self, die):
printing_increment = 0
if die.tag == "DW_TAG_subprogram":
if self.show == self._get_die_name(die):
printing_increment = 1
self.printing += printing_increment
self._output_element(die)
self._push_indent()
for key, attribute in die.attributes.items():
self._output_element(attribute)
for child in die.iter_children():
self._die_info_rec(child)
self._pop_indent()
self.printing -= printing_increment
def _get_die_name(self, die):
if "DW_AT_name" in die.attributes:
return die.attributes["DW_AT_name"].value.decode()
else:
return "<unknown name>"
def _push_indent(self):
self.indent += 1
def _pop_indent(self):
self.indent -= 1
def _output_element(self, e):
if self.printing > 0:
indent = " " * self.indent
self.output.write(f"[{e.offset:4}] {indent}{self._render_element(e)}\n")
def _render_element(self, e):
if isinstance(e, AttributeValue) :
return f"{e.name} = {e.value}"
elif isinstance(e, DIE) :
return f"{e.tag}"
|
py | 1a3bec87c370ece78d6381a1fe247955d34720b5 | import copy
from io import StringIO
import iso8583
import iso8583.specs
import pytest
from iso8583.tools import _wrap_bytes_repr, _wrap_str_repr
spec = copy.deepcopy(iso8583.specs.default)
def test_pp(capsys):
# fmt: off
spec["h"]["max_len"] = 6
spec["h"]["len_type"] = 0
doc_dec = {}
iso8583.pp(doc_dec, spec)
captured = capsys.readouterr()
r = captured.out.split("\n")
assert r[0] == ""
assert len(r) == 1
doc_dec["h"] = "header"
doc_dec["t"] = "0200"
doc_dec["2"] = "12345678"
doc_dec["44"] = "123"
doc_dec["123"] = "123"
_, doc_enc = iso8583.encode(doc_dec, spec)
iso8583.pp(doc_dec, spec)
captured = capsys.readouterr()
r = captured.out.split("\n")
assert r[0] == "h Message Header : 'header'"
assert r[1] == "t Message Type : '0200'"
assert r[2] == "p Bitmap, Primary : 'C000000000100000'"
assert r[3] == "1 Bitmap, Secondary : '0000000000000020'"
assert r[4] == "2 Primary Account Number (PAN) : '12345678'"
assert r[5] == "44 Additional Response Data : '123'"
assert r[6] == "123 Reserved for Private Use : '123'"
assert r[7] == ""
assert len(r) == 8
iso8583.pp(doc_enc, spec)
captured = capsys.readouterr()
r = captured.out.split("\n")
assert r[0] == "h Message Header : b'header'"
assert r[1] == "t Message Type : b'0200'"
assert r[2] == "p Bitmap, Primary : b'\\xc0\\x00\\x00\\x00\\x00\\x10\\x00\\x00'"
assert r[3] == "1 Bitmap, Secondary : b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00 '"
assert r[4] == "2 Primary Account Number (PAN) : b'08' b'12345678'"
assert r[5] == "44 Additional Response Data : b'03' b'123'"
assert r[6] == "123 Reserved for Private Use : b'003' b'123'"
assert r[7] == ""
assert len(r) == 8
# fmt: on
def test_pp_variable_header(capsys):
# fmt: off
spec["h"]["max_len"] = 6
spec["h"]["len_type"] = 2
doc_dec = {}
doc_dec["h"] = "header"
doc_dec["t"] = "0200"
doc_dec["2"] = "12345678"
doc_dec["44"] = "123"
doc_dec["123"] = "123"
_, doc_enc = iso8583.encode(doc_dec, spec)
iso8583.pp(doc_dec, spec)
captured = capsys.readouterr()
r = captured.out.split("\n")
assert r[0] == "h Message Header : 'header'"
assert r[1] == "t Message Type : '0200'"
assert r[2] == "p Bitmap, Primary : 'C000000000100000'"
assert r[3] == "1 Bitmap, Secondary : '0000000000000020'"
assert r[4] == "2 Primary Account Number (PAN) : '12345678'"
assert r[5] == "44 Additional Response Data : '123'"
assert r[6] == "123 Reserved for Private Use : '123'"
assert r[7] == ""
assert len(r) == 8
iso8583.pp(doc_enc, spec)
captured = capsys.readouterr()
r = captured.out.split("\n")
assert r[0] == "h Message Header : b'06' b'header'"
assert r[1] == "t Message Type : b'0200'"
assert r[2] == "p Bitmap, Primary : b'\\xc0\\x00\\x00\\x00\\x00\\x10\\x00\\x00'"
assert r[3] == "1 Bitmap, Secondary : b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00 '"
assert r[4] == "2 Primary Account Number (PAN) : b'08' b'12345678'"
assert r[5] == "44 Additional Response Data : b'03' b'123'"
assert r[6] == "123 Reserved for Private Use : b'003' b'123'"
assert r[7] == ""
assert len(r) == 8
doc_dec["h"] = ""
_, doc_enc = iso8583.encode(doc_dec, spec)
iso8583.pp(doc_dec, spec)
captured = capsys.readouterr()
r = captured.out.split("\n")
assert r[0] == "h Message Header : ''"
assert r[1] == "t Message Type : '0200'"
assert r[2] == "p Bitmap, Primary : 'C000000000100000'"
assert r[3] == "1 Bitmap, Secondary : '0000000000000020'"
assert r[4] == "2 Primary Account Number (PAN) : '12345678'"
assert r[5] == "44 Additional Response Data : '123'"
assert r[6] == "123 Reserved for Private Use : '123'"
assert r[7] == ""
assert len(r) == 8
iso8583.pp(doc_enc, spec)
captured = capsys.readouterr()
r = captured.out.split("\n")
assert r[0] == "h Message Header : b'00' b''"
assert r[1] == "t Message Type : b'0200'"
assert r[2] == "p Bitmap, Primary : b'\\xc0\\x00\\x00\\x00\\x00\\x10\\x00\\x00'"
assert r[3] == "1 Bitmap, Secondary : b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00 '"
assert r[4] == "2 Primary Account Number (PAN) : b'08' b'12345678'"
assert r[5] == "44 Additional Response Data : b'03' b'123'"
assert r[6] == "123 Reserved for Private Use : b'003' b'123'"
assert r[7] == ""
assert len(r) == 8
# fmt: on
def test_pp_stream():
# fmt: off
spec["h"]["max_len"] = 6
spec["h"]["len_type"] = 0
doc_dec = {}
doc_dec["h"] = "header"
doc_dec["t"] = "0200"
doc_dec["2"] = "12345678"
doc_dec["44"] = "123"
doc_dec["123"] = "123"
_, doc_enc = iso8583.encode(doc_dec, spec)
sio = StringIO()
iso8583.pp(doc_dec, spec, stream=sio)
r = sio.getvalue().split("\n")
assert r[0] == "h Message Header : 'header'"
assert r[1] == "t Message Type : '0200'"
assert r[2] == "p Bitmap, Primary : 'C000000000100000'"
assert r[3] == "1 Bitmap, Secondary : '0000000000000020'"
assert r[4] == "2 Primary Account Number (PAN) : '12345678'"
assert r[5] == "44 Additional Response Data : '123'"
assert r[6] == "123 Reserved for Private Use : '123'"
assert r[7] == ""
assert len(r) == 8
sio = StringIO()
iso8583.pp(doc_enc, spec, stream=sio)
r = sio.getvalue().split("\n")
assert r[0] == "h Message Header : b'header'"
assert r[1] == "t Message Type : b'0200'"
assert r[2] == "p Bitmap, Primary : b'\\xc0\\x00\\x00\\x00\\x00\\x10\\x00\\x00'"
assert r[3] == "1 Bitmap, Secondary : b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00 '"
assert r[4] == "2 Primary Account Number (PAN) : b'08' b'12345678'"
assert r[5] == "44 Additional Response Data : b'03' b'123'"
assert r[6] == "123 Reserved for Private Use : b'003' b'123'"
assert r[7] == ""
assert len(r) == 8
# fmt: on
def test_pp_optional_fields():
# fmt: off
spec["h"]["max_len"] = 6
spec["h"]["len_type"] = 0
# Empty
doc_dec = {}
sio = StringIO()
iso8583.pp(doc_dec, spec, stream=sio)
r = sio.getvalue().split("\n")
assert r[0] == ""
assert len(r) == 1
# Add header
doc_dec = {}
doc_dec["h"] = "header"
sio = StringIO()
iso8583.pp(doc_dec, spec, stream=sio)
r = sio.getvalue().split("\n")
assert r[0] == "h Message Header : 'header'"
assert r[1] == ""
assert len(r) == 2
# Add header, type
doc_dec = {}
doc_dec["h"] = "header"
doc_dec["t"] = "0200"
sio = StringIO()
iso8583.pp(doc_dec, spec, stream=sio)
r = sio.getvalue().split("\n")
assert r[0] == "h Message Header : 'header'"
assert r[1] == "t Message Type : '0200'"
assert r[2] == ""
assert len(r) == 3
# Add header, type, field 2
doc_dec = {}
doc_dec["h"] = "header"
doc_dec["t"] = "0200"
doc_dec["2"] = "12345678"
sio = StringIO()
iso8583.pp(doc_dec, spec, stream=sio)
r = sio.getvalue().split("\n")
assert r[0] == "h Message Header : 'header'"
assert r[1] == "t Message Type : '0200'"
assert r[2] == "2 Primary Account Number (PAN) : '12345678'"
assert r[3] == ""
assert len(r) == 4
# Add header, type, field 123 + encode
doc_dec = {}
doc_dec["h"] = "header"
doc_dec["t"] = "0200"
doc_dec["123"] = "123"
_, doc_enc = iso8583.encode(doc_dec, spec)
sio = StringIO()
iso8583.pp(doc_dec, spec, stream=sio)
r = sio.getvalue().split("\n")
assert r[0] == "h Message Header : 'header'"
assert r[1] == "t Message Type : '0200'"
assert r[2] == "p Bitmap, Primary : '8000000000000000'"
assert r[3] == "1 Bitmap, Secondary : '0000000000000020'"
assert r[4] == "123 Reserved for Private Use : '123'"
assert r[5] == ""
assert len(r) == 6
sio = StringIO()
iso8583.pp(doc_enc, spec, stream=sio)
r = sio.getvalue().split("\n")
assert r[0] == "h Message Header : b'header'"
assert r[1] == "t Message Type : b'0200'"
assert r[2] == "p Bitmap, Primary : b'\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00'"
assert r[3] == "1 Bitmap, Secondary : b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00 '"
assert r[4] == "123 Reserved for Private Use : b'003' b'123'"
assert r[5] == ""
assert len(r) == 6
# fmt: on
def test_pp_header_present_but_not_in_spec():
# fmt: off
spec["h"]["max_len"] = 0
spec["h"]["len_type"] = 0
# Empty
doc_dec = {}
sio = StringIO()
iso8583.pp(doc_dec, spec, stream=sio)
r = sio.getvalue().split("\n")
assert r[0] == ""
assert len(r) == 1
# Add header
doc_dec = {}
doc_dec["h"] = "header"
sio = StringIO()
iso8583.pp(doc_dec, spec, stream=sio)
r = sio.getvalue().split("\n")
assert r[0] == ""
assert len(r) == 1
# Add header, type
doc_dec = {}
doc_dec["h"] = "header"
doc_dec["t"] = "0200"
sio = StringIO()
iso8583.pp(doc_dec, spec, stream=sio)
r = sio.getvalue().split("\n")
assert r[0] == "t Message Type : '0200'"
assert r[1] == ""
assert len(r) == 2
# fmt: on
def test_pp_no_desc():
# fmt: off
spec["h"]["max_len"] = 0
spec["h"]["len_type"] = 0
doc_dec = {}
doc_dec["t"] = "0200"
doc_dec["2"] = "12345678"
doc_dec["44"] = "123"
doc_dec["123"] = "123"
_, doc_enc = iso8583.encode(doc_dec, spec)
sio = StringIO()
iso8583.pp(doc_dec, spec, 0, stream=sio)
r = sio.getvalue().split("\n")
assert r[0] == "t : '0200'"
assert r[1] == "p : 'C000000000100000'"
assert r[2] == "1 : '0000000000000020'"
assert r[3] == "2 : '12345678'"
assert r[4] == "44 : '123'"
assert r[5] == "123: '123'"
assert r[6] == ""
assert len(r) == 7
sio = StringIO()
iso8583.pp(doc_enc, spec, 0, stream=sio)
r = sio.getvalue().split("\n")
assert r[0] == "t : b'0200'"
assert r[1] == "p : b'\\xc0\\x00\\x00\\x00\\x00\\x10\\x00\\x00'"
assert r[2] == "1 : b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00 '"
assert r[3] == "2 : b'08' b'12345678'"
assert r[4] == "44 : b'03' b'123'"
assert r[5] == "123: b'003' b'123'"
assert r[6] == ""
assert len(r) == 7
# fmt: on
def test_pp_folding():
# fmt: off
spec["h"]["max_len"] = 0
spec["h"]["len_type"] = 0
doc_dec = {}
doc_dec["t"] = "0200"
doc_dec["123"] = "123456789012345678901234567890123456789012345678901234567890"
_, doc_enc = iso8583.encode(doc_dec, spec)
# standard width = 80
sio = StringIO()
iso8583.pp(doc_dec, spec, stream=sio, line_width=80)
r = sio.getvalue().split("\n")
assert r[0] == "t Message Type : '0200'"
assert r[1] == "p Bitmap, Primary : '8000000000000000'"
assert r[2] == "1 Bitmap, Secondary : '0000000000000020'"
assert r[3] == "123 Reserved for Private Use : '123456789012345678901234567890123456789012'"
assert r[4] == " '345678901234567890'"
assert r[5] == ""
assert len(r) == 6
sio = StringIO()
iso8583.pp(doc_enc, spec, stream=sio, line_width=80)
r = sio.getvalue().split("\n")
assert r[0] == "t Message Type : b'0200'"
assert r[1] == "p Bitmap, Primary : b'\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00'"
assert r[2] == "1 Bitmap, Secondary : b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00 '"
assert r[3] == "123 Reserved for Private Use : b'060' b'1234567890123456789012345678901234'"
assert r[4] == " b'56789012345678901234567890'"
assert r[5] == ""
assert len(r) == 6
# reduced width = 80
sio = StringIO()
iso8583.pp(doc_dec, spec, stream=sio, line_width=60)
r = sio.getvalue().split("\n")
assert r[0] == "t Message Type : '0200'"
assert r[1] == "p Bitmap, Primary : '8000000000000000'"
assert r[2] == "1 Bitmap, Secondary : '0000000000000020'"
assert r[3] == "123 Reserved for Private Use : '1234567890123456789012'"
assert r[4] == " '3456789012345678901234'"
assert r[5] == " '5678901234567890'"
assert r[6] == ""
assert len(r) == 7
sio = StringIO()
iso8583.pp(doc_enc, spec, stream=sio, line_width=60)
r = sio.getvalue().split("\n")
assert r[0] == "t Message Type : b'0200'"
assert r[1] == "p Bitmap, Primary : b'\\x80\\x00\\x00\\x00\\x00'"
assert r[2] == " b'\\x00\\x00\\x00'"
assert r[3] == "1 Bitmap, Secondary : b'\\x00\\x00\\x00\\x00\\x00'"
assert r[4] == " b'\\x00\\x00 '"
assert r[5] == "123 Reserved for Private Use : b'060' b'12345678901234'"
assert r[6] == " b'56789012345678'"
assert r[7] == " b'90123456789012'"
assert r[8] == " b'34567890123456'"
assert r[9] == " b'7890'"
assert r[10] == ""
assert len(r) == 11
# even chunks
sio = StringIO()
iso8583.pp(doc_dec, spec, stream=sio, line_width=68)
r = sio.getvalue().split("\n")
assert r[0] == "t Message Type : '0200'"
assert r[1] == "p Bitmap, Primary : '8000000000000000'"
assert r[2] == "1 Bitmap, Secondary : '0000000000000020'"
assert r[3] == "123 Reserved for Private Use : '123456789012345678901234567890'"
assert r[4] == " '123456789012345678901234567890'"
assert r[5] == ""
assert len(r) == 6
sio = StringIO()
iso8583.pp(doc_enc, spec, stream=sio, line_width=61)
r = sio.getvalue().split("\n")
assert r[0] == "t Message Type : b'0200'"
assert r[1] == "p Bitmap, Primary : b'\\x80\\x00\\x00\\x00\\x00'"
assert r[2] == " b'\\x00\\x00\\x00'"
assert r[3] == "1 Bitmap, Secondary : b'\\x00\\x00\\x00\\x00\\x00'"
assert r[4] == " b'\\x00\\x00 '"
assert r[5] == "123 Reserved for Private Use : b'060' b'123456789012345'"
assert r[6] == " b'678901234567890'"
assert r[7] == " b'123456789012345'"
assert r[8] == " b'678901234567890'"
assert r[9] == ""
assert len(r) == 10
# This is a test scenario where "b''" triggers a fold
doc_dec = {}
doc_dec["t"] = "0200"
doc_dec["123"] = "12"
_, doc_enc = iso8583.encode(doc_dec, spec)
sio = StringIO()
iso8583.pp(doc_enc, spec, stream=sio, line_width=44)
r = sio.getvalue().split("\n")
assert r[0] == "t Message Type : b'0200'"
assert r[1] == "p Bitmap, Primary : b'\\x80'"
assert r[2] == " b'\\x00'"
assert r[3] == " b'\\x00'"
assert r[4] == " b'\\x00'"
assert r[5] == " b'\\x00'"
assert r[6] == " b'\\x00'"
assert r[7] == " b'\\x00'"
assert r[8] == " b'\\x00'"
assert r[9] == "1 Bitmap, Secondary : b'\\x00'"
assert r[10] == " b'\\x00'"
assert r[11] == " b'\\x00'"
assert r[12] == " b'\\x00'"
assert r[13] == " b'\\x00'"
assert r[14] == " b'\\x00'"
assert r[15] == " b'\\x00 '"
assert r[16] == "123 Reserved for Private Use : b'002' b'1'"
assert r[17] == " b'2'"
assert r[18] == ""
assert len(r) == 19
# This is a test scenario where "''" triggers a fold
sio = StringIO()
iso8583.pp(doc_dec, spec, stream=sio, line_width=37)
r = sio.getvalue().split("\n")
assert r[0] == "t Message Type : '0'"
assert r[1] == " '2'"
assert r[2] == " '0'"
assert r[3] == " '0'"
assert r[4] == "p Bitmap, Primary : '8'"
assert r[5] == " '0'"
assert r[6] == " '0'"
assert r[7] == " '0'"
assert r[8] == " '0'"
assert r[9] == " '0'"
assert r[10] == " '0'"
assert r[11] == " '0'"
assert r[12] == " '0'"
assert r[13] == " '0'"
assert r[14] == " '0'"
assert r[15] == " '0'"
assert r[16] == " '0'"
assert r[17] == " '0'"
assert r[18] == " '0'"
assert r[19] == " '0'"
assert r[20] == "1 Bitmap, Secondary : '0'"
assert r[21] == " '0'"
assert r[22] == " '0'"
assert r[23] == " '0'"
assert r[24] == " '0'"
assert r[25] == " '0'"
assert r[26] == " '0'"
assert r[27] == " '0'"
assert r[28] == " '0'"
assert r[29] == " '0'"
assert r[30] == " '0'"
assert r[31] == " '0'"
assert r[32] == " '0'"
assert r[33] == " '0'"
assert r[34] == " '2'"
assert r[35] == " '0'"
assert r[36] == "123 Reserved for Private Use : '1'"
assert r[37] == " '2'"
assert r[38] == ""
assert len(r) == 39
# This is a test scenario where _pprint_bytes does not
# try to fold because the data is <= 1
doc_dec = {}
doc_dec["t"] = "0200"
doc_dec["123"] = ""
_, doc_enc = iso8583.encode(doc_dec, spec)
sio = StringIO()
iso8583.pp(doc_enc, spec, stream=sio, line_width=44)
r = sio.getvalue().split("\n")
assert r[0] == "t Message Type : b'0200'"
assert r[1] == "p Bitmap, Primary : b'\\x80'"
assert r[2] == " b'\\x00'"
assert r[3] == " b'\\x00'"
assert r[4] == " b'\\x00'"
assert r[5] == " b'\\x00'"
assert r[6] == " b'\\x00'"
assert r[7] == " b'\\x00'"
assert r[8] == " b'\\x00'"
assert r[9] == "1 Bitmap, Secondary : b'\\x00'"
assert r[10] == " b'\\x00'"
assert r[11] == " b'\\x00'"
assert r[12] == " b'\\x00'"
assert r[13] == " b'\\x00'"
assert r[14] == " b'\\x00'"
assert r[15] == " b'\\x00 '"
assert r[16] == "123 Reserved for Private Use : b'000' b''"
assert r[17] == ""
assert len(r) == 18
# This is a test scenario where _pprint_str does not
# try to fold because the data is <= 1
sio = StringIO()
iso8583.pp(doc_dec, spec, stream=sio, line_width=37)
r = sio.getvalue().split("\n")
assert r[0] == "t Message Type : '0'"
assert r[1] == " '2'"
assert r[2] == " '0'"
assert r[3] == " '0'"
assert r[4] == "p Bitmap, Primary : '8'"
assert r[5] == " '0'"
assert r[6] == " '0'"
assert r[7] == " '0'"
assert r[8] == " '0'"
assert r[9] == " '0'"
assert r[10] == " '0'"
assert r[11] == " '0'"
assert r[12] == " '0'"
assert r[13] == " '0'"
assert r[14] == " '0'"
assert r[15] == " '0'"
assert r[16] == " '0'"
assert r[17] == " '0'"
assert r[18] == " '0'"
assert r[19] == " '0'"
assert r[20] == "1 Bitmap, Secondary : '0'"
assert r[21] == " '0'"
assert r[22] == " '0'"
assert r[23] == " '0'"
assert r[24] == " '0'"
assert r[25] == " '0'"
assert r[26] == " '0'"
assert r[27] == " '0'"
assert r[28] == " '0'"
assert r[29] == " '0'"
assert r[30] == " '0'"
assert r[31] == " '0'"
assert r[32] == " '0'"
assert r[33] == " '0'"
assert r[34] == " '2'"
assert r[35] == " '0'"
assert r[36] == "123 Reserved for Private Use : ''"
assert r[37] == ""
assert len(r) == 38
# This is a test scenario where _pprint_bytes does not
# try to fold because the data is <= 1
doc_dec = {}
doc_dec["t"] = "0200"
doc_dec["123"] = "1"
_, doc_enc = iso8583.encode(doc_dec, spec)
sio = StringIO()
iso8583.pp(doc_enc, spec, stream=sio, line_width=44)
r = sio.getvalue().split("\n")
assert r[0] == "t Message Type : b'0200'"
assert r[1] == "p Bitmap, Primary : b'\\x80'"
assert r[2] == " b'\\x00'"
assert r[3] == " b'\\x00'"
assert r[4] == " b'\\x00'"
assert r[5] == " b'\\x00'"
assert r[6] == " b'\\x00'"
assert r[7] == " b'\\x00'"
assert r[8] == " b'\\x00'"
assert r[9] == "1 Bitmap, Secondary : b'\\x00'"
assert r[10] == " b'\\x00'"
assert r[11] == " b'\\x00'"
assert r[12] == " b'\\x00'"
assert r[13] == " b'\\x00'"
assert r[14] == " b'\\x00'"
assert r[15] == " b'\\x00 '"
assert r[16] == "123 Reserved for Private Use : b'001' b'1'"
assert r[17] == ""
assert len(r) == 18
# This is a test scenario where _pprint_str does not
# try to fold because the data is <= 1
sio = StringIO()
iso8583.pp(doc_dec, spec, stream=sio, line_width=37)
r = sio.getvalue().split("\n")
assert r[0] == "t Message Type : '0'"
assert r[1] == " '2'"
assert r[2] == " '0'"
assert r[3] == " '0'"
assert r[4] == "p Bitmap, Primary : '8'"
assert r[5] == " '0'"
assert r[6] == " '0'"
assert r[7] == " '0'"
assert r[8] == " '0'"
assert r[9] == " '0'"
assert r[10] == " '0'"
assert r[11] == " '0'"
assert r[12] == " '0'"
assert r[13] == " '0'"
assert r[14] == " '0'"
assert r[15] == " '0'"
assert r[16] == " '0'"
assert r[17] == " '0'"
assert r[18] == " '0'"
assert r[19] == " '0'"
assert r[20] == "1 Bitmap, Secondary : '0'"
assert r[21] == " '0'"
assert r[22] == " '0'"
assert r[23] == " '0'"
assert r[24] == " '0'"
assert r[25] == " '0'"
assert r[26] == " '0'"
assert r[27] == " '0'"
assert r[28] == " '0'"
assert r[29] == " '0'"
assert r[30] == " '0'"
assert r[31] == " '0'"
assert r[32] == " '0'"
assert r[33] == " '0'"
assert r[34] == " '2'"
assert r[35] == " '0'"
assert r[36] == "123 Reserved for Private Use : '1'"
assert r[37] == ""
assert len(r) == 38
# Negative line parameters
doc_dec = {}
doc_dec["t"] = "0200"
doc_dec["123"] = "1"
_, doc_enc = iso8583.encode(doc_dec, spec)
sio = StringIO()
iso8583.pp(doc_enc, spec, stream=sio, desc_width=-99, line_width=-99)
r = sio.getvalue().split("\n")
assert r[0] == "t : b'0'"
assert r[1] == " b'2'"
assert r[2] == " b'0'"
assert r[3] == " b'0'"
assert r[4] == "p : b'\\x80'"
assert r[5] == " b'\\x00'"
assert r[6] == " b'\\x00'"
assert r[7] == " b'\\x00'"
assert r[8] == " b'\\x00'"
assert r[9] == " b'\\x00'"
assert r[10] == " b'\\x00'"
assert r[11] == " b'\\x00'"
assert r[12] == "1 : b'\\x00'"
assert r[13] == " b'\\x00'"
assert r[14] == " b'\\x00'"
assert r[15] == " b'\\x00'"
assert r[16] == " b'\\x00'"
assert r[17] == " b'\\x00'"
assert r[18] == " b'\\x00'"
assert r[19] == " b' '"
assert r[20] == "123: b'001' b'1'"
assert r[21] == ""
assert len(r) == 22
sio = StringIO()
iso8583.pp(doc_dec, spec, stream=sio, desc_width=-99, line_width=-99)
r = sio.getvalue().split("\n")
assert r[0] == "t : '0'"
assert r[1] == " '2'"
assert r[2] == " '0'"
assert r[3] == " '0'"
assert r[4] == "p : '8'"
assert r[5] == " '0'"
assert r[6] == " '0'"
assert r[7] == " '0'"
assert r[8] == " '0'"
assert r[9] == " '0'"
assert r[10] == " '0'"
assert r[11] == " '0'"
assert r[12] == " '0'"
assert r[13] == " '0'"
assert r[14] == " '0'"
assert r[15] == " '0'"
assert r[16] == " '0'"
assert r[17] == " '0'"
assert r[18] == " '0'"
assert r[19] == " '0'"
assert r[20] == "1 : '0'"
assert r[21] == " '0'"
assert r[22] == " '0'"
assert r[23] == " '0'"
assert r[24] == " '0'"
assert r[25] == " '0'"
assert r[26] == " '0'"
assert r[27] == " '0'"
assert r[28] == " '0'"
assert r[29] == " '0'"
assert r[30] == " '0'"
assert r[31] == " '0'"
assert r[32] == " '0'"
assert r[33] == " '0'"
assert r[34] == " '2'"
assert r[35] == " '0'"
assert r[36] == "123: '1'"
assert r[37] == ""
assert len(r) == 38
# fmt: on
def test_pp_invalid_types():
"""Invalid types should simply be printed as repr()
If encoded dictionary does not have required 'len' and 'data'
keys then no data should be printed.
"""
# fmt: off
spec["h"]["max_len"] = 0
spec["h"]["len_type"] = 0
doc_dec = {}
doc_dec["t"] = [1, 2, 3, 4]
doc_dec["123"] = set([1, 2, 3])
# standard width = 80
sio = StringIO()
iso8583.pp(doc_dec, spec, stream=sio, line_width=80)
r = sio.getvalue().split("\n")
assert r[0] == "t Message Type : [1, 2, 3, 4]"
assert r[1] == "123 Reserved for Private Use : {1, 2, 3}"
assert r[2] == ""
assert len(r) == 3
# trigger unkown dispatch with reduced width = 1
sio = StringIO()
iso8583.pp(doc_dec, spec, stream=sio, line_width=1)
r = sio.getvalue().split("\n")
assert r[0] == "t Message Type : [1, 2, 3, 4]"
assert r[1] == "123 Reserved for Private Use : {1, 2, 3}"
assert r[2] == ""
assert len(r) == 3
# invalid encoded data
doc_enc = {}
doc_enc["t"] = {}
doc_enc["1"] = {'len': b'len'}
doc_enc["2"] = {'data': b'data'}
doc_enc["3"] = {'spam': b'eggs'}
sio = StringIO()
iso8583.pp(doc_enc, spec, stream=sio, line_width=80)
r = sio.getvalue().split("\n")
assert r[0] == "t Message Type : b''"
assert r[1] == "1 Bitmap, Secondary : b'len' b''"
assert r[2] == "2 Primary Account Number (PAN) : b'data'"
assert r[3] == "3 Processing Code : b''"
assert r[4] == ""
assert len(r) == 5
# fmt: on
def test_pp_no_yield_on_empty_string(capsys):
for _ in _wrap_bytes_repr(b"", 10):
print("spam")
for _ in _wrap_str_repr("", 10):
print("eggs")
captured = capsys.readouterr()
assert captured.out == ""
|
py | 1a3bedbfd3bd47edb1c49d6d4312a54d23c016cf | from __future__ import absolute_import, division, print_function
# DIALS version numbers are constructed from
# 1. a common prefix
__dials_version_format = "DIALS %s"
# 2. the most recent annotated git tag (or failing that: a default string)
__dials_version_default = "2.dev"
# 3. a dash followed by the number of commits since that tag
# 4. a dash followed by a lowercase 'g' and the current commit id
def get_git_version(dials_path, treat_merges_as_single_commit=False):
import os
import subprocess
version = None
with open(os.devnull, "w") as devnull:
# Obtain name of the current branch. If this fails then the other commands will probably also fail
branch = subprocess.check_output(
["git", "describe", "--contains", "--all", "HEAD"],
cwd=dials_path,
stderr=devnull,
).rstrip()
releasebranch = "dials-2" in branch
# Always treat merges as single commit on release branches
if releasebranch:
treat_merges_as_single_commit = True
# Get descriptive version string, eg. v1.1.0-1-g56f9cd7
if treat_merges_as_single_commit:
try:
# Get a 'correct' depth, which should be the shortest path to the most recent tag
version = subprocess.check_output(
["git", "describe", "--long", "--first-parent"],
cwd=dials_path,
stderr=devnull,
).rstrip()
except Exception:
pass # This is not supported on older git versions < 1.8.4.
if version is None:
# Find the most recent tag
version = subprocess.check_output(
["git", "describe", "--long"], cwd=dials_path, stderr=devnull
).rstrip()
if treat_merges_as_single_commit:
tag = version[: version.rindex("-", 0, version.rindex("-"))]
commit = version[version.rindex("-") + 1 :] # 'gxxxxxxx'
# Now find the first-parent-path
depth = subprocess.check_output(
["git", "rev-list", "%s..HEAD" % tag, "--first-parent"],
cwd=dials_path,
stderr=devnull,
).rstrip()
if depth:
depth = depth.strip().count("\n") + 1
else:
depth = 0
version = "%s-%d-%s" % (tag, depth, commit)
# Turn descriptive version string into proper version number
if version[0] == "v":
version = version[1:].replace(".0-", "-")
version = version.replace("-", ".", 1)
# If we are on a release branch, then append a '-release'-tag
if releasebranch:
version = version + "-release"
return version
# When run from a development installation the version information is extracted
# from the git repository. Otherwise it is read from the file '.gitversion' in the
# DIALS module directory.
def dials_version():
"""Try to obtain the current git revision number
and store a copy in .gitversion"""
version = None
try:
import os
dials_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
version_file = os.path.join(dials_path, ".gitversion")
# 1. Try to access information in .git directory
# Regenerate .gitversion if possible
if not os.environ.get("DIALS_SKIP_GIT_VERSIONING") and os.path.exists(
os.path.join(dials_path, ".git")
):
try:
version = get_git_version(dials_path)
with open(version_file, "w") as gv:
gv.write(version)
except Exception:
if version == "":
version = None
# 2. If .git directory missing or 'git describe' failed, read .gitversion
if (version is None) and os.path.exists(version_file):
with open(version_file, "r") as gv:
version = gv.read().rstrip()
except Exception:
pass
if version is None:
version = __dials_version_format % __dials_version_default
else:
version = __dials_version_format % version
return version
|
py | 1a3bee28dfacdde1390a0b19aaf40e2c92e70441 | from base import api
from .helpers import TestsDatasets
from .helpers import LibraryPopulator
from .helpers import wait_on_state
class LibrariesApiTestCase( api.ApiTestCase, TestsDatasets ):
def setUp( self ):
super( LibrariesApiTestCase, self ).setUp()
self.library_populator = LibraryPopulator( self )
def test_create( self ):
data = dict( name="CreateTestLibrary" )
create_response = self._post( "libraries", data=data, admin=True )
self._assert_status_code_is( create_response, 200 )
library = create_response.json()
self._assert_has_keys( library, "name" )
assert library[ "name" ] == "CreateTestLibrary"
def test_create_private_library_permissions( self ):
library = self.library_populator.new_library( "PermissionTestLibrary" )
library_id = library[ "id" ]
role_id = self.library_populator.user_private_role_id()
self.library_populator.set_permissions( library_id, role_id )
create_response = self._create_folder( library )
self._assert_status_code_is( create_response, 200 )
def test_create_dataset( self ):
library = self.library_populator.new_private_library( "ForCreateDatasets" )
payload, files = self.library_populator.create_dataset_request( library, file_type="txt", contents="create_test" )
create_response = self._post( "libraries/%s/contents" % library[ "id" ], payload, files=files )
self._assert_status_code_is( create_response, 200 )
library_datasets = create_response.json()
assert len( library_datasets ) == 1
library_dataset = library_datasets[ 0 ]
def show():
return self._get( "libraries/%s/contents/%s" % ( library[ "id" ], library_dataset[ "id" ] ) )
wait_on_state( show, assert_ok=True )
library_dataset = show().json()
self._assert_has_keys( library_dataset, "peek", "data_type" )
assert library_dataset[ "peek" ].find("create_test") >= 0
assert library_dataset[ "file_ext" ] == "txt", library_dataset[ "file_ext" ]
def _create_folder( self, library ):
create_data = dict(
folder_id=library[ "root_folder_id" ],
create_type="folder",
name="New Folder",
)
return self._post( "libraries/%s/contents" % library[ "id" ], data=create_data )
|
py | 1a3beefe63f55dcdeeba88e1d179fa315b0b5d1b | #
# This file is part of pyasn1-alt-modules software.
#
# Created by Russ Housley
# Copyright (c) 2019-2021, Vigil Security, LLC
# License: http://vigilsec.com/pyasn1-alt-modules-license.txt
#
import sys
import unittest
from pyasn1.codec.der.decoder import decode as der_decoder
from pyasn1.codec.der.encoder import encode as der_encoder
from pyasn1.type import univ
from pyasn1_alt_modules import pem
from pyasn1_alt_modules import rfc2985
from pyasn1_alt_modules import rfc5280
from pyasn1_alt_modules import rfc5652
from pyasn1_alt_modules import rfc7292
from pyasn1_alt_modules import opentypemap
class PKCS9AttrsTestCase(unittest.TestCase):
pem_text = """\
MYIQjzAOBgNVBEExBwwFQWxpY2UwDwYIKwYBBQUHCQMxAxMBTTAQBgNVBAUxCRMH
QjQ4LTAwNzAQBggrBgEFBQcJBDEEEwJVUzAQBggrBgEFBQcJBTEEEwJVUzARBgoq
hkiG9w0BCRkEMQMCATAwFAYJKoZIhvcNAQkCMQcWBUFsaWNlMBgGCiqGSIb3DQEJ
GQMxCgQIUTeqnHYky4AwHAYJKoZIhvcNAQkPMQ8wDTALBglghkgBZQMEAS0wHQYI
KwYBBQUHCQExERgPMjAxOTA4MDMxMjAwMDBaMB0GCCsGAQUFBwkCMREMD0hlcm5k
b24sIFZBLCBVUzApBgkqhkiG9w0BCRQxHB4aAEYAcgBpAGUAbgBkAGwAeQAgAE4A
YQBtAGUwLwYJKoZIhvcNAQkIMSITIDEyMyBVbmtub3duIFdheSwgTm93aGVyZSwg
VkEsIFVTMIGZBgoqhkiG9w0BCRkCMYGKMIGHMAsGCWCGSAFlAwQBLQR4VsJb7t4l
IqjJCT54rqkbCJsBPE17YQJeEYvyA4M1aDIUU5GnCgEhctgMiDPWGMvaSziixdIg
aU/0zvWvYCm8UwPvBBwMtm9X5NDvk9p4nXbGAT8E/OsV1SYWVvwRJwYak0yWWexM
HSixw1Ljh2nb0fIbqwLOeMmIMIIEsQYKKoZIhvcNAQkZBTGCBKEwggSdBgkqhkiG
9w0BBwKgggSOMIIEigIBATENMAsGCWCGSAFlAwQCAjBRBgkqhkiG9w0BBwGgRARC
Q29udGVudC1UeXBlOiB0ZXh0L3BsYWluDQoNCldhdHNvbiwgY29tZSBoZXJlIC0g
SSB3YW50IHRvIHNlZSB5b3UuoIICfDCCAngwggH+oAMCAQICCQCls1QoG7BuOzAK
BggqhkjOPQQDAzA/MQswCQYDVQQGEwJVUzELMAkGA1UECAwCVkExEDAOBgNVBAcM
B0hlcm5kb24xETAPBgNVBAoMCEJvZ3VzIENBMB4XDTE5MDUyOTE0NDU0MVoXDTIw
MDUyODE0NDU0MVowcDELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlZBMRAwDgYDVQQH
EwdIZXJuZG9uMRAwDgYDVQQKEwdFeGFtcGxlMQ4wDAYDVQQDEwVBbGljZTEgMB4G
CSqGSIb3DQEJARYRYWxpY2VAZXhhbXBsZS5jb20wdjAQBgcqhkjOPQIBBgUrgQQA
IgNiAAT4zZ8HL+xEDpXWkoWp5xFMTz4u4Ae1nF6zXCYlmsEGD5vPu5hl9hDEjd1U
HRgJIPoy3fJcWWeZ8FHCirICtuMgFisNscG/aTwKyDYOFDuqz/C2jyEwqgWCRyxy
ohuJXtmjgZQwgZEwCwYDVR0PBAQDAgeAMEIGCWCGSAGG+EIBDQQ1FjNUaGlzIGNl
cnRpZmljYXRlIGNhbm5vdCBiZSB0cnVzdGVkIGZvciBhbnkgcHVycG9zZS4wHQYD
VR0OBBYEFMS6Wg4+euM8gbD0Aqpouxbglg41MB8GA1UdIwQYMBaAFPI12zQE2qVV
8r1pA5mwYuziFQjBMAoGCCqGSM49BAMDA2gAMGUCMGO5H9E1uAveRGGaf48lN4po
v2yH+hCAc5hOAuZKe/f40MKSF8q4w2ij+0euSaKFiAIxAL3gxp6sMitCmLQgOH6/
RBIC/2syJ97y0KVp9da0PDAvwxLugCHTKZPjjpSLPHHc9TGCAaEwggGdAgEBMEww
PzELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZBMRAwDgYDVQQHDAdIZXJuZG9uMREw
DwYDVQQKDAhCb2d1cyBDQQIJAKWzVCgbsG47MAsGCWCGSAFlAwQCAqCByDAYBgkq
hkiG9w0BCQMxCwYJKoZIhvcNAQcBMBwGCSqGSIb3DQEJBTEPFw0xOTA1MjkxODIz
MTlaMD8GCSqGSIb3DQEJBDEyBDC25CKk/YJnHtT3qsZtRPTosLmNUVhxxlbn8Jo2
+lys4+IKEOba8jebiTfTTPmZJmwwTQYLKoZIhvcNAQkQAgExPjA8BCDHTyEPZCdX
CPUOh5EQs211nQ999bgFAi9zDBVz+ChTo4ABATAVMBOBEWFsaWNlQGV4YW1wbGUu
Y29tMAoGCCqGSM49BAMDBGYwZAIwOLV5WCbYjy5HLHE69IqXQQHVDJQzmo18WwkF
rEYH3EMsvpXEIGqsFTFN6NV4VBe9AjA5fGOCP5IhI32YqmGfs+zDlqZyb2xSX6Gr
/IfCIm0angfOI39g7lAZDyivjh5H/oQwggnoBgtghkgBhvhCAwGBWDGCCdcwggnT
AgEDMIIJjwYJKoZIhvcNAQcBoIIJgASCCXwwggl4MIIGCAYJKoZIhvcNAQcBoIIF
+QSCBfUwggXxMIIF7QYLKoZIhvcNAQwKAQKgggT+MIIE+jAcBgoqhkiG9w0BDAED
MA4ECO6rT/7SnK61AgIH0ASCBNhl7+ZgGmaQO8qy97gTAhXCjVM2/iV3LHWodlbY
iHqpAJj42/Uye/3B7TNROXine1DMI9ZeetIDzYiA52i0sh7PhjBeuCIqFwiRJIv7
bIKYCgz6qSOIAgqr6XdQnpeFp97YqDgST/RGQel7obCNO115+SlelmBxwwSik60p
AwslawMzunvvH9qafrIiTa2myQqpRj/ifxjESJNZxG1O2FiplAi36r3icotim3Sj
zzRJU5+90SqnkogjtxODrQYkv6fqg3qGY/RuwAy+eT3V/z+UUoyL22w1T8qdSFsN
WmMnAFCSGBuoHHoZ22ipItKVg09UzTCWe3CbUmEfjJuJDmw3Oo7sWVYLltxjCS86
XHWAauyFjmMr9aNsDiloGnFKSChslF6Ktj0F6ohOe+iReW5vi16EeEzbQiTjakpr
eQZoeajC/N+XGoT6jKxbk5r1dtnEEJ+Q4wnvSjiGpr6frr4T+4pw301sptOjfO3f
F23rKk7Advvi3k5xZobHcRmzDSfT9X5agtKlc4HCnHTz7XKHstXb1o1DSgTNVWQX
phhFBm10gx6zfEHaLqyMtqXbWe2TuIHMwnBWiLnbhIBn+hbxK4MCfVz3cBZbApks
Au/lXcVnakOJBcCtx/MMfZ3kcnI3Hs6W8rM2ASeDBLIQLVduOc6xlVSoYUQ24NNr
9usfigQkcSTJZPIO52vPyIIQ7zR7U8TiqonkKWU3QJJVarPgLEYMUhBfNHqiGfx/
d1Hf4MBoti8CMFUwsmOTv6d+cHYvQelqeFMXP0DE88gN/mkFBDAzXiXzAqMQcjJ+
pyW6l4o2iQFSvXKSKg/IKved/hGp7RngQohjg4KlbqeGuRYea8Xs4pH5ue5KTeOc
HGNI3Qi/Lmr2rd+e1iuGxwwYZHve6Z+Lxnb20zW9I/2MFm+KsCiB4Z/+x84jR7BG
8l//lpuc2D/vxnKTxaaUAdUXM0Zwze7e+Gc2lMhVG5TJWR1KY51vN5J+apDYc8IR
0L0c2bbkom3WkPq/po/dPDuoaX61nKmztUHaL5r5QZzBBwKVyhdw9J0btnWAFPNK
vzgy5U9iV4+6jXH5TCmlIreszwRPoqqEaYRIfmUpp2+zy91PpzjTs98tx/HIAbOM
fT3WmuTahEnEHehABhwq+S4xwzoVIskLbrcOP6l7UYYR7GTUCjKxh7ru0rSwHrqG
9t33YdzJaFbz+8jb88xtf454Rvur66Cew/4GYX9u1Zef0DF9So1ay3IicpOf5emo
VWIwg4bh7bELi78i/MbdWtNZQcXimykfeTsYH8Q4u+1uxHS5pwEWWwKiUnLQVpZP
2ut255TdgSIhEILwsaLVelRrx/lp14EpY355FOusXiju6g14aWfBnt5udvuTXxDQ
ZHPPNNk+gwzgvvTey98T941hYUctjg0NApJiB66bfrlYB9mkc5ftg5zqhEasYH5C
4ajKKRNMM7zGlwSZvy8PPhnAeE3Q9LTnos0l4ygjQD/kMlvd7XSLW3GUzjyxtkG4
gQh6LGvnafAbgu7GpcapKEppN86sXEePHiQjj92n103+TxMYWwtaO4iAwkjqdEdt
avEHcXRcpdqC0st6nUwPAPAC4LKJbZgLQnNG+wlWIiCMMD56IdfQ7r/zGIr13MxC
kjNNUdISoWWE5GnQMYHbMBMGCSqGSIb3DQEJFTEGBAQBAAAAMFcGCSqGSIb3DQEJ
FDFKHkgAMwBmADcAMQBhAGYANgA1AC0AMQA2ADgANwAtADQANAA0AGEALQA5AGYA
NAA2AC0AYwA4AGIAZQAxADkANABjADMAZQA4AGUwawYJKwYBBAGCNxEBMV4eXABN
AGkAYwByAG8AcwBvAGYAdAAgAEUAbgBoAGEAbgBjAGUAZAAgAEMAcgB5AHAAdABv
AGcAcgBhAHAAaABpAGMAIABQAHIAbwB2AGkAZABlAHIAIAB2ADEALgAwMIIDaAYJ
KoZIhvcNAQcBoIIDWQSCA1UwggNRMIIDTQYLKoZIhvcNAQwKAQOgggMlMIIDIQYK
KoZIhvcNAQkWAaCCAxEEggMNMIIDCTCCAfGgAwIBAgIQNu32hzqhCKdHATXzboyI
ETANBgkqhkiG9w0BAQUFADAUMRIwEAYDVQQDEwlhbm9ueW1vdXMwIBcNMTYwNzE5
MjIwMDAxWhgPMjExNjA2MjUyMjAwMDFaMBQxEjAQBgNVBAMTCWFub255bW91czCC
ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALy2sEJMGNdcDg6BI7mdFM5T
lPzo5sKBzvUnagK5SKBJ11xMPN5toPTBzICB/XTWEB3AwpD0O+srSca+bsUAyedS
5V4BNp8qCyEu5RNRR8qPHheJ/guhLT96/gGI4jlrUyUhFntPkLKODxu+7KanMy6K
dD+PVE8shXRUZTYe4PG64/c7z3wapnf4XoCXkJRzCY5f3MKz3Ul039kVnTlJcikd
C7I9I9RflXLwXVl4nxUbeeRt6Z8WVWS4pCq+14v2aVPvP3mtVmAYHedRkvS04Hrx
4xx98D3NSSw6Z5OLkzqOcFw15fYmH2NLdhh34gSWJmaaCBAbuQ+1rx/42p7MvvsC
AwEAAaNVMFMwFQYDVR0lBA4wDAYKKwYBBAGCNwoDBDAvBgNVHREEKDAmoCQGCisG
AQQBgjcUAgOgFgwUYW5vbnltb3VzQHdpbmRvd3MteAAwCQYDVR0TBAIwADANBgkq
hkiG9w0BAQUFAAOCAQEAuH7iqY0/MLozwFb39ILYAJDHE+HToZBQbHQP4YtienrU
Stk60rIp0WH65lam7m/JhgAcItc/tV1L8mEnLrvvKcA+NeIL8sDOtM28azvgcOi0
P3roeLLLRCuiykUaKmUcZEDm9cDYKIpJf7QetWQ3uuGTk9iRzpH79x2ix35BnyWQ
Rr3INZzmX/+9YRvPBXKYl/89F/w1ORYArpI9XtjfuPWaGQmM4f1WRHE2t3qRyKFF
ri7QiZdpcSx5zvsRHSyjfUMoKs+b6upk+P01lIhg/ewwYngGab+fZhF15pTNN2hx
8PdNGcrGzrkNKCmJKrWCa2xczuMA+z8SCuC1tYTKmDEVMBMGCSqGSIb3DQEJFTEG
BAQBAAAAMDswHzAHBgUrDgMCGgQUpWCP/fZR0TK5BwGuqvTd0+duiKcEFJTubF2k
HktMK+isIjxOTk4yJTOOAgIH0A==
"""
def setUp(self):
self.asn1Spec = rfc2985.AttributeSet()
def testDerCodec(self):
substrate = pem.readBase64fromText(self.pem_text)
asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
self.assertFalse(rest)
self.assertTrue(asn1Object.prettyPrint())
self.assertEqual(der_encoder(asn1Object), substrate)
openTypesMap = {
rfc2985.pkcs_9_at_smimeCapabilities: rfc2985.SMIMECapabilities(),
}
openTypesMap.update(opentypemap.get('certificateAttributesMap'))
openTypesMap.update(opentypemap.get('cmsAttributesMap'))
for attr in asn1Object:
self.assertIn(attr['type'], openTypesMap)
av, rest = der_decoder(
attr['values'][0], asn1Spec=openTypesMap[attr['type']])
self.assertFalse(rest)
self.assertTrue(av.prettyPrint())
self.assertEqual(attr['values'][0], der_encoder(av))
if attr['type'] == rfc2985.pkcs_9_at_userPKCS12:
self.assertEqual(univ.Integer(3), av['version'])
self.assertEqual(rfc5652.id_data, av['authSafe']['contentType'])
outdata, rest = der_decoder(
av['authSafe']['content'], asn1Spec=univ.OctetString())
self.assertFalse(rest)
authsafe, rest = der_decoder(
outdata, asn1Spec=rfc7292.AuthenticatedSafe())
self.assertFalse(rest)
for ci in authsafe:
self.assertEqual(rfc5652.id_data, ci['contentType'])
indata, rest = der_decoder(
ci['content'], asn1Spec=univ.OctetString())
self.assertFalse(rest)
sc, rest = der_decoder(
indata, asn1Spec=rfc7292.SafeContents())
self.assertFalse(rest)
pkcs12BagTypeMap = opentypemap.get('pkcs12BagTypeMap')
for sb in sc:
if sb['bagId'] in pkcs12BagTypeMap:
bv, rest = der_decoder(sb['bagValue'],
asn1Spec=pkcs12BagTypeMap[sb['bagId']])
self.assertFalse(rest)
for bagattr in sb['bagAttributes']:
if bagattr['attrType'] in openTypesMap:
inav, rest = der_decoder(
bagattr['attrValues'][0],
asn1Spec=openTypesMap[bagattr['attrType']])
self.assertFalse(rest)
if bagattr['attrType'] == rfc2985.pkcs_9_at_friendlyName:
self.assertEqual(
"3f71af65-1687-444a-9f46-c8be194c3e8e",
inav)
if bagattr['attrType'] == rfc2985.pkcs_9_at_localKeyId:
self.assertEqual(
univ.OctetString(hexValue='01000000'),
inav)
if attr['type'] == rfc2985.pkcs_9_at_pkcs7PDU:
ci, rest = der_decoder(
attr['values'][0], asn1Spec=rfc5652.ContentInfo())
self.assertFalse(rest)
self.assertEqual(rfc5652.id_signedData, ci['contentType'])
sd, rest = der_decoder(
ci['content'], asn1Spec=rfc5652.SignedData())
self.assertFalse(rest)
self.assertEqual(1, sd['version'])
for si in sd['signerInfos']:
self.assertEqual(1, si['version'])
for siattr in si['signedAttrs']:
if siattr['attrType'] in openTypesMap:
siav, rest = der_decoder(siattr['attrValues'][0],
asn1Spec=openTypesMap[siattr['attrType']])
self.assertFalse(rest)
if siattr['attrType'] == rfc2985.pkcs_9_at_contentType:
self.assertEqual(rfc5652.id_data, siav)
if siattr['attrType'] == rfc2985.pkcs_9_at_messageDigest:
self.assertEqual('b6e422a4', siav.prettyPrint()[2:10])
if siattr['attrType'] == rfc2985.pkcs_9_at_signingTime:
self.assertEqual('190529182319Z', siav['utcTime'])
for choices in sd['certificates']:
for rdn in choices[0]['tbsCertificate']['subject']['rdnSequence']:
if rdn[0]['type'] in openTypesMap:
nv, rest = der_decoder(rdn[0]['value'],
asn1Spec=openTypesMap[rdn[0]['type']])
self.assertFalse(rest)
if rdn[0]['type'] == rfc2985.pkcs_9_at_emailAddress:
self.assertEqual('[email protected]', nv)
def testOpenTypes(self):
openTypesMap = {
rfc2985.pkcs_9_at_smimeCapabilities: rfc2985.SMIMECapabilities(),
}
openTypesMap.update(opentypemap.get('certificateAttributesMap'))
openTypesMap.update(opentypemap.get('cmsAttributesMap'))
substrate = pem.readBase64fromText(self.pem_text)
asn1Object, rest = der_decoder(
substrate, asn1Spec=self.asn1Spec,
openTypes=openTypesMap, decodeOpenTypes=True)
self.assertFalse(rest)
self.assertTrue(asn1Object.prettyPrint())
self.assertEqual(substrate, der_encoder(asn1Object))
for attr in asn1Object:
self.assertTrue(attr['type'], openTypesMap)
if attr['type'] == rfc2985.pkcs_9_at_userPKCS12:
self.assertEqual(univ.Integer(3), attr['values'][0]['version'])
self.assertEqual(rfc5652.id_data, attr['values'][0]['authSafe']['contentType'])
authsafe, rest = der_decoder(
attr['values'][0]['authSafe']['content'],
asn1Spec=rfc7292.AuthenticatedSafe())
self.assertFalse(rest)
for ci in authsafe:
self.assertEqual(rfc5652.id_data, ci['contentType'])
indata, rest = der_decoder(ci['content'],
asn1Spec=univ.OctetString())
self.assertFalse(rest)
sc, rest = der_decoder(indata,
asn1Spec=rfc7292.SafeContents(), decodeOpenTypes=True)
self.assertFalse(rest)
pkcs12BagTypeMap = opentypemap.get('pkcs12BagTypeMap')
for sb in sc:
if sb['bagId'] in pkcs12BagTypeMap:
for bagattr in sb['bagAttributes']:
if bagattr['attrType'] in openTypesMap:
if bagattr['attrType'] == rfc2985.pkcs_9_at_friendlyName:
self.assertEqual(
"3f71af65-1687-444a-9f46-c8be194c3e8e",
bagattr['attrValues'][0])
if bagattr['attrType'] == rfc2985.pkcs_9_at_localKeyId:
self.assertEqual(
univ.OctetString(hexValue='01000000'),
bagattr['attrValues'][0])
if attr['type'] == rfc2985.pkcs_9_at_pkcs7PDU:
self.assertEqual(rfc5652.id_signedData, attr['values'][0]['contentType'])
self.assertEqual(1, attr['values'][0]['content']['version'])
for si in attr['values'][0]['content']['signerInfos']:
self.assertEqual(1, si['version'])
for siattr in si['signedAttrs']:
if siattr['attrType'] in openTypesMap:
if siattr['attrType'] == rfc2985.pkcs_9_at_contentType:
self.assertEqual(
rfc5652.id_data, siattr['attrValues'][0])
if siattr['attrType'] == rfc2985.pkcs_9_at_messageDigest:
self.assertEqual('b6e422a4',
siattr['attrValues'][0].prettyPrint()[2:10])
if siattr['attrType'] == rfc2985.pkcs_9_at_signingTime:
self.assertEqual('190529182319Z',
siattr['attrValues'][0]['utcTime'])
for choices in attr['values'][0]['content']['certificates']:
for rdn in choices[0]['tbsCertificate']['subject']['rdnSequence']:
if rdn[0]['type'] in openTypesMap:
if rdn[0]['type'] == rfc2985.pkcs_9_at_emailAddress:
self.assertEqual('[email protected]', rdn[0]['value'])
suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite)
|
py | 1a3bef4c26e2e2e58b10727c1bd789001593db30 | """
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from petstore_api.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from petstore_api.exceptions import ApiAttributeError
def lazy_import():
from petstore_api.model.quadrilateral_interface import QuadrilateralInterface
from petstore_api.model.shape_interface import ShapeInterface
globals()['QuadrilateralInterface'] = QuadrilateralInterface
globals()['ShapeInterface'] = ShapeInterface
class SimpleQuadrilateral(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'shape_type': (str,), # noqa: E501
'quadrilateral_type': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'shape_type': 'shapeType', # noqa: E501
'quadrilateral_type': 'quadrilateralType', # noqa: E501
}
read_only_vars = {
}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""SimpleQuadrilateral - a model defined in OpenAPI
Keyword Args:
shape_type (str):
quadrilateral_type (str):
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
composed_info = validate_get_composed_info(
constant_args, kwargs, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
discarded_args = composed_info[3]
for var_name, var_value in kwargs.items():
if var_name in discarded_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""SimpleQuadrilateral - a model defined in OpenAPI
Keyword Args:
shape_type (str):
quadrilateral_type (str):
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
composed_info = validate_get_composed_info(
constant_args, kwargs, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
discarded_args = composed_info[3]
for var_name, var_value in kwargs.items():
if var_name in discarded_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
QuadrilateralInterface,
ShapeInterface,
],
'oneOf': [
],
}
|
py | 1a3bef532700e7f1a10490f3077dcbeeb17269fd | __version__ = '4.0.1'
|
py | 1a3bf06a0f14549de815f61884c3270723fee59c | print ("Digite uma sequência de valores terminada por zero.")
soma = 0
valor = 1
while valor != 0:
valor = float(input("Digite um valor a ser somado: "))
soma = soma + valor
print ("A soma dos valores digitados é:", soma)
|
py | 1a3bf1be23b887d550c79a08c1c1f011837845f7 | import pandas as pd
import numpy as np
from typing import Dict, Any, Union, Tuple, AnyStr
from sklearn import datasets, metrics, model_selection
from sklearn.model_selection import train_test_split, cross_val_score, cross_validate
from sklearn.metrics import accuracy_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
import mlflow
import mlflow.sklearn
import hyperopt
from hyperopt.pyll.base import scope
from hyperopt import Trials, hp
from modeler import Modeler
import click
def regression_metrics(actual: pd.Series,
pred: pd.Series) -> Dict:
"""Return a collection of regression metrics as a Series.
Args:
actual: series of actual/true values
pred: series of predicted values
Returns:
Series with the following values in a labeled index:
MAE, RMSE
"""
return {
"ACCURACY": accuracy_score(actual,pred),
"F1": metrics.f1_score(actual,pred)}
def fit_and_log_cv(model,
x_train: Union[pd.DataFrame, np.array],
y_train: Union[pd.Series, np.array],
x_test: Union[pd.DataFrame, np.array],
y_test: Union[pd.Series, np.array],
params: Dict[str, Any],
nested: bool = False) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""Fit a model and log it along with train/CV metrics.
Args:
x_train: feature matrix for training/CV data
y_train: label array for training/CV data
x_test: feature matrix for test data
y_test: label array for test data
nested: if true, mlflow run will be started as child
of existing parent
"""
with mlflow.start_run(nested=nested) as run:
# Fit CV models; extract predictions and metrics
print(type(params))
print(params)
model_cv = model(**params)
y_pred_cv = model_selection.cross_val_predict(model_cv, x_train, y_train)
metrics_cv = {
f"val_{metric}": value
for metric, value in regression_metrics(y_train, y_pred_cv).items()}
# Fit and log full training sample model; extract predictions and metrics
mlflow.sklearn.autolog()
model = model(**params)
model.fit(x_train, y_train)
y_pred_test = model.predict(x_test)
metrics_test = {
f"test_{metric}": value
for metric, value in regression_metrics(y_test, y_pred_test).items()}
metrics = {**metrics_test, **metrics_cv}
mlflow.log_metrics(metrics)
mlflow.sklearn.log_model(model, "model")
return metrics
def build_train_objective(model,
x_train: Union[pd.DataFrame, np.array],
y_train: Union[pd.Series, np.array],
x_test: Union[pd.DataFrame, np.array],
y_test: Union[pd.Series, np.array],
metric: str):
"""Build optimization objective function fits and evaluates model.
Args:
x_train: feature matrix for training/CV data
y_train: label array for training/CV data
x_test: feature matrix for test data
y_test: label array for test data
metric: name of metric to be optimized
Returns:
Optimization function set up to take parameter dict from Hyperopt.
"""
def train_func(params):
"""Train a model and return loss metric."""
metrics = fit_and_log_cv(model,
x_train, y_train, x_test, y_test, params, nested=True)
return {'status': hyperopt.STATUS_OK, 'loss': -metrics[metric]}
return train_func
def log_best(run: mlflow.entities.Run,
metric: str) -> None:
"""Log the best parameters from optimization to the parent experiment.
Args:
run: current run to log metrics
metric: name of metric to select best and log
"""
client = mlflow.tracking.MlflowClient()
runs = client.search_runs(
[run.info.experiment_id],
"tags.mlflow.parentRunId = '{run_id}' ".format(run_id=run.info.run_id))
best_run = min(runs, key=lambda run: -run.data.metrics[metric])
mlflow.set_tag("best_run", best_run.info.run_id)
mlflow.log_metric(f"best_{metric}", best_run.data.metrics[metric])
##############################################################################
@click.command()
@click.option('--name', type=str, default='')
@click.option('--maxeval', type=int, default=10)
@click.option('--metric', type=str, default='val_F1')
def main(name,maxeval,metric):
"""Triggers experiment looping through ML algorithms
Args:
name: name of experiment
maxeval: maximum number of evaluation
metric: name of metric to minimize cost function
"""
mlflow.set_experiment(name)
MAX_EVALS = maxeval
METRIC = metric
space = [{
'max_depth': hp.choice('max_depth', range(1,20)),
'max_features': hp.choice('max_features', range(1,26)),
'n_estimators': hp.choice('n_estimators', range(100,500)),
'criterion': hp.choice('criterion', ["gini", "entropy"])},
{'var_smoothing':hp.uniform('var_smoothing', 0.000000001,0.000001)}]
X_train, X_test, y_train, y_test = Modeler().prepro()
for index, algo in enumerate([RandomForestClassifier,GaussianNB]):
with mlflow.start_run(run_name=str(algo)) as run:
trials = Trials()
train_objective = build_train_objective(algo,X_train, y_train, X_test, y_test, METRIC)
hyperopt.fmin(fn=train_objective,
space=space[index],
algo=hyperopt.tpe.suggest,
max_evals=MAX_EVALS,
trials=trials)
log_best(run, METRIC)
# search_run_id = run.info.run_id
# experiment_id = run.info.experiment_id
mlflow.end_run()
if __name__ == '__main__':
main() |
py | 1a3bf2f83c1080401415bb46c53b583b8c480fb7 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SecurityRuleAssociations(Model):
"""All security rules associated with the network interface.
:param network_interface_association:
:type network_interface_association: :class:`NetworkInterfaceAssociation
<azure.mgmt.network.models.NetworkInterfaceAssociation>`
:param subnet_association:
:type subnet_association: :class:`SubnetAssociation
<azure.mgmt.network.models.SubnetAssociation>`
:param default_security_rules: Collection of default security rules of the
network security group.
:type default_security_rules: list of :class:`SecurityRule
<azure.mgmt.network.models.SecurityRule>`
:param effective_security_rules: Collection of effective security rules.
:type effective_security_rules: list of
:class:`EffectiveNetworkSecurityRule
<azure.mgmt.network.models.EffectiveNetworkSecurityRule>`
"""
_attribute_map = {
'network_interface_association': {'key': 'networkInterfaceAssociation', 'type': 'NetworkInterfaceAssociation'},
'subnet_association': {'key': 'subnetAssociation', 'type': 'SubnetAssociation'},
'default_security_rules': {'key': 'defaultSecurityRules', 'type': '[SecurityRule]'},
'effective_security_rules': {'key': 'effectiveSecurityRules', 'type': '[EffectiveNetworkSecurityRule]'},
}
def __init__(self, network_interface_association=None, subnet_association=None, default_security_rules=None, effective_security_rules=None):
self.network_interface_association = network_interface_association
self.subnet_association = subnet_association
self.default_security_rules = default_security_rules
self.effective_security_rules = effective_security_rules
|
py | 1a3bf3bea32460a809f079372278dfc7c89ce2d6 | import argparse
import math
from urllib.request import urlopen
import sys
import os
import json
import subprocess
import glob
from braceexpand import braceexpand
from types import SimpleNamespace
import os.path
from omegaconf import OmegaConf
import torch
from torch import nn, optim
from torch.nn import functional as F
from torchvision import transforms
from torchvision.transforms import functional as TF
torch.backends.cudnn.benchmark = False # NR: True is a bit faster, but can lead to OOM. False is more deterministic.
#torch.use_deterministic_algorithms(True) # NR: grid_sampler_2d_backward_cuda does not have a deterministic implementation
from torch_optimizer import DiffGrad, AdamP, RAdam
from perlin_numpy import generate_fractal_noise_2d
from CLIP import clip
import kornia
import kornia.augmentation as K
import numpy as np
import imageio
import re
import random
from einops import rearrange
from PIL import ImageFile, Image, PngImagePlugin
ImageFile.LOAD_TRUNCATED_IMAGES = True
# or 'border'
global_padding_mode = 'reflection'
global_aspect_width = 1
global_spot_file = None
from util import map_number, palette_from_string, real_glob
from vqgan import VqganDrawer
class_table = {
"vqgan": VqganDrawer
}
try:
from clipdrawer import ClipDrawer
from pixeldrawer import PixelDrawer
from linedrawer import LineDrawer
# update class_table if these import OK
class_table.update({
"line_sketch": LineDrawer,
"pixel": PixelDrawer,
"clipdraw": ClipDrawer
})
except ImportError:
# diffvg is not strictly required
pass
try:
import matplotlib.colors
except ImportError:
# only needed for palette stuff
pass
# this is enabled when not in the master branch
# print("warning: running unreleased future version")
# https://stackoverflow.com/a/39662359
def isnotebook():
try:
shell = get_ipython().__class__.__name__
if shell == 'ZMQInteractiveShell':
return True # Jupyter notebook or qtconsole
elif shell == 'Shell':
return True # Seems to be what co-lab does
elif shell == 'TerminalInteractiveShell':
return False # Terminal running IPython
else:
return False # Other type (?)
except NameError:
return False # Probably standard Python interpreter
IS_NOTEBOOK = isnotebook()
if IS_NOTEBOOK:
from IPython import display
from tqdm.notebook import tqdm
from IPython.display import clear_output
else:
from tqdm import tqdm
# Functions and classes
def sinc(x):
return torch.where(x != 0, torch.sin(math.pi * x) / (math.pi * x), x.new_ones([]))
def lanczos(x, a):
cond = torch.logical_and(-a < x, x < a)
out = torch.where(cond, sinc(x) * sinc(x/a), x.new_zeros([]))
return out / out.sum()
def ramp(ratio, width):
n = math.ceil(width / ratio + 1)
out = torch.empty([n])
cur = 0
for i in range(out.shape[0]):
out[i] = cur
cur += ratio
return torch.cat([-out[1:].flip([0]), out])[1:-1]
# NR: Testing with different intital images
def old_random_noise_image(w,h):
random_image = Image.fromarray(np.random.randint(0,255,(w,h,3),dtype=np.dtype('uint8')))
return random_image
def NormalizeData(data):
return (data - np.min(data)) / (np.max(data) - np.min(data))
# https://stats.stackexchange.com/a/289477
def contrast_noise(n):
n = 0.9998 * n + 0.0001
n1 = (n / (1-n))
n2 = np.power(n1, -2)
n3 = 1 / (1 + n2)
return n3
def random_noise_image(w,h):
# scale up roughly as power of 2
if (w>1024 or h>1024):
side, octp = 2048, 7
elif (w>512 or h>512):
side, octp = 1024, 6
elif (w>256 or h>256):
side, octp = 512, 5
else:
side, octp = 256, 4
nr = NormalizeData(generate_fractal_noise_2d((side, side), (32, 32), octp))
ng = NormalizeData(generate_fractal_noise_2d((side, side), (32, 32), octp))
nb = NormalizeData(generate_fractal_noise_2d((side, side), (32, 32), octp))
stack = np.dstack((contrast_noise(nr),contrast_noise(ng),contrast_noise(nb)))
substack = stack[:h, :w, :]
im = Image.fromarray((255.9 * stack).astype('uint8'))
return im
# testing
def gradient_2d(start, stop, width, height, is_horizontal):
if is_horizontal:
return np.tile(np.linspace(start, stop, width), (height, 1))
else:
return np.tile(np.linspace(start, stop, height), (width, 1)).T
def gradient_3d(width, height, start_list, stop_list, is_horizontal_list):
result = np.zeros((height, width, len(start_list)), dtype=float)
for i, (start, stop, is_horizontal) in enumerate(zip(start_list, stop_list, is_horizontal_list)):
result[:, :, i] = gradient_2d(start, stop, width, height, is_horizontal)
return result
def random_gradient_image(w,h):
array = gradient_3d(w, h, (0, 0, np.random.randint(0,255)), (np.random.randint(1,255), np.random.randint(2,255), np.random.randint(3,128)), (True, False, False))
random_image = Image.fromarray(np.uint8(array))
return random_image
class ReplaceGrad(torch.autograd.Function):
@staticmethod
def forward(ctx, x_forward, x_backward):
ctx.shape = x_backward.shape
return x_forward
@staticmethod
def backward(ctx, grad_in):
return None, grad_in.sum_to_size(ctx.shape)
replace_grad = ReplaceGrad.apply
def spherical_dist_loss(x, y):
x = F.normalize(x, dim=-1)
y = F.normalize(y, dim=-1)
return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
class Prompt(nn.Module):
def __init__(self, embed, weight=1., stop=float('-inf')):
super().__init__()
self.register_buffer('embed', embed)
self.register_buffer('weight', torch.as_tensor(weight))
self.register_buffer('stop', torch.as_tensor(stop))
def forward(self, input):
input_normed = F.normalize(input.unsqueeze(1), dim=2)
embed_normed = F.normalize(self.embed.unsqueeze(0), dim=2)
dists = input_normed.sub(embed_normed).norm(dim=2).div(2).arcsin().pow(2).mul(2)
dists = dists * self.weight.sign()
return self.weight.abs() * replace_grad(dists, torch.maximum(dists, self.stop)).mean()
def parse_prompt(prompt):
vals = prompt.rsplit(':', 2)
vals = vals + ['', '1', '-inf'][len(vals):]
# print(f"parsed vals is {vals}")
return vals[0], float(vals[1]), float(vals[2])
from typing import cast, Dict, List, Optional, Tuple, Union
# override class to get padding_mode
class MyRandomPerspective(K.RandomPerspective):
def apply_transform(
self, input: torch.Tensor, params: Dict[str, torch.Tensor], transform: Optional[torch.Tensor] = None
) -> torch.Tensor:
_, _, height, width = input.shape
transform = cast(torch.Tensor, transform)
return kornia.geometry.warp_perspective(
input, transform, (height, width),
mode=self.resample.name.lower(), align_corners=self.align_corners, padding_mode=global_padding_mode
)
cached_spot_indexes = {}
def fetch_spot_indexes(sideX, sideY):
global global_spot_file
# make sure image is loaded if we need it
cache_key = (sideX, sideY)
if cache_key not in cached_spot_indexes:
if global_spot_file is not None:
mask_image = Image.open(global_spot_file)
elif global_aspect_width != 1:
mask_image = Image.open("inputs/spot_wide.png")
else:
mask_image = Image.open("inputs/spot_square.png")
# this is a one channel mask
mask_image = mask_image.convert('RGB')
mask_image = mask_image.resize((sideX, sideY), Image.LANCZOS)
mask_image_tensor = TF.to_tensor(mask_image)
# print("ONE CHANNEL ", mask_image_tensor.shape)
mask_indexes = mask_image_tensor.ge(0.5).to(device)
# print("GE ", mask_indexes.shape)
# sys.exit(0)
mask_indexes_off = mask_image_tensor.lt(0.5).to(device)
cached_spot_indexes[cache_key] = [mask_indexes, mask_indexes_off]
return cached_spot_indexes[cache_key]
# n = torch.ones((3,5,5))
# f = generate.fetch_spot_indexes(5, 5)
# f[0].shape = [60,3]
class MakeCutouts(nn.Module):
def __init__(self, cut_size, cutn, cut_pow=1.):
global global_aspect_width
super().__init__()
self.cut_size = cut_size
self.cutn = cutn
self.cutn_zoom = int(2*cutn/3)
self.cut_pow = cut_pow
self.transforms = None
augmentations = []
if global_aspect_width != 1:
augmentations.append(K.RandomCrop(size=(self.cut_size,self.cut_size), p=1.0, cropping_mode="resample", return_transform=True))
augmentations.append(MyRandomPerspective(distortion_scale=0.40, p=0.7, return_transform=True))
augmentations.append(K.RandomResizedCrop(size=(self.cut_size,self.cut_size), scale=(0.1,0.75), ratio=(0.85,1.2), cropping_mode='resample', p=0.7, return_transform=True))
augmentations.append(K.ColorJitter(hue=0.1, saturation=0.1, p=0.8, return_transform=True))
self.augs_zoom = nn.Sequential(*augmentations)
augmentations = []
if global_aspect_width == 1:
n_s = 0.95
n_t = (1-n_s)/2
augmentations.append(K.RandomAffine(degrees=0, translate=(n_t, n_t), scale=(n_s, n_s), p=1.0, return_transform=True))
elif global_aspect_width > 1:
n_s = 1/global_aspect_width
n_t = (1-n_s)/2
augmentations.append(K.RandomAffine(degrees=0, translate=(0, n_t), scale=(0.9*n_s, n_s), p=1.0, return_transform=True))
else:
n_s = global_aspect_width
n_t = (1-n_s)/2
augmentations.append(K.RandomAffine(degrees=0, translate=(n_t, 0), scale=(0.9*n_s, n_s), p=1.0, return_transform=True))
# augmentations.append(K.CenterCrop(size=(self.cut_size,self.cut_size), p=1.0, cropping_mode="resample", return_transform=True))
augmentations.append(K.CenterCrop(size=self.cut_size, cropping_mode='resample', p=1.0, return_transform=True))
augmentations.append(K.RandomPerspective(distortion_scale=0.20, p=0.7, return_transform=True))
augmentations.append(K.ColorJitter(hue=0.1, saturation=0.1, p=0.8, return_transform=True))
self.augs_wide = nn.Sequential(*augmentations)
self.noise_fac = 0.1
# Pooling
self.av_pool = nn.AdaptiveAvgPool2d((self.cut_size, self.cut_size))
self.max_pool = nn.AdaptiveMaxPool2d((self.cut_size, self.cut_size))
def forward(self, input, spot=None):
global global_aspect_width, cur_iteration
sideY, sideX = input.shape[2:4]
max_size = min(sideX, sideY)
min_size = min(sideX, sideY, self.cut_size)
cutouts = []
mask_indexes = None
if spot is not None:
spot_indexes = fetch_spot_indexes(self.cut_size, self.cut_size)
if spot == 0:
mask_indexes = spot_indexes[1]
else:
mask_indexes = spot_indexes[0]
# print("Mask indexes ", mask_indexes)
for _ in range(self.cutn):
# Pooling
cutout = (self.av_pool(input) + self.max_pool(input))/2
if mask_indexes is not None:
cutout[0][mask_indexes] = 0.0 # 0.5
if global_aspect_width != 1:
if global_aspect_width > 1:
cutout = kornia.geometry.transform.rescale(cutout, (1, global_aspect_width))
else:
cutout = kornia.geometry.transform.rescale(cutout, (1/global_aspect_width, 1))
# if cur_iteration % 50 == 0 and _ == 0:
# print(cutout.shape)
# TF.to_pil_image(cutout[0].cpu()).save(f"cutout_im_{cur_iteration:02d}_{spot}.png")
cutouts.append(cutout)
if self.transforms is not None:
# print("Cached transforms available")
batch1 = kornia.geometry.transform.warp_perspective(torch.cat(cutouts[:self.cutn_zoom], dim=0), self.transforms[:self.cutn_zoom],
(self.cut_size, self.cut_size), padding_mode=global_padding_mode)
batch2 = kornia.geometry.transform.warp_perspective(torch.cat(cutouts[self.cutn_zoom:], dim=0), self.transforms[self.cutn_zoom:],
(self.cut_size, self.cut_size), padding_mode='zeros')
batch = torch.cat([batch1, batch2])
# if cur_iteration < 2:
# for j in range(4):
# TF.to_pil_image(batch[j].cpu()).save(f"cached_im_{cur_iteration:02d}_{j:02d}_{spot}.png")
# j_wide = j + self.cutn_zoom
# TF.to_pil_image(batch[j_wide].cpu()).save(f"cached_im_{cur_iteration:02d}_{j_wide:02d}_{spot}.png")
else:
batch1, transforms1 = self.augs_zoom(torch.cat(cutouts[:self.cutn_zoom], dim=0))
batch2, transforms2 = self.augs_wide(torch.cat(cutouts[self.cutn_zoom:], dim=0))
# print(batch1.shape, batch2.shape)
batch = torch.cat([batch1, batch2])
# print(batch.shape)
self.transforms = torch.cat([transforms1, transforms2])
## batch, self.transforms = self.augs(torch.cat(cutouts, dim=0))
# if cur_iteration < 2:
# for j in range(4):
# TF.to_pil_image(batch[j].cpu()).save(f"live_im_{cur_iteration:02d}_{j:02d}_{spot}.png")
# j_wide = j + self.cutn_zoom
# TF.to_pil_image(batch[j_wide].cpu()).save(f"live_im_{cur_iteration:02d}_{j_wide:02d}_{spot}.png")
# print(batch.shape, self.transforms.shape)
if self.noise_fac:
facs = batch.new_empty([self.cutn, 1, 1, 1]).uniform_(0, self.noise_fac)
batch = batch + facs * torch.randn_like(batch)
return batch
def resize_image(image, out_size):
ratio = image.size[0] / image.size[1]
area = min(image.size[0] * image.size[1], out_size[0] * out_size[1])
size = round((area * ratio)**0.5), round((area / ratio)**0.5)
return image.resize(size, Image.LANCZOS)
def rebuild_optimisers(args):
global best_loss, best_iter, best_z, num_loss_drop, max_loss_drops, iter_drop_delay
global drawer
drop_divisor = 10 ** num_loss_drop
new_opts = drawer.get_opts(drop_divisor)
if new_opts == None:
# legacy
dropped_learning_rate = args.learning_rate/drop_divisor;
# print(f"Optimizing with {args.optimiser} set to {dropped_learning_rate}")
# Set the optimiser
to_optimize = [ drawer.get_z() ]
if args.optimiser == "Adam":
opt = optim.Adam(to_optimize, lr=dropped_learning_rate) # LR=0.1
elif args.optimiser == "AdamW":
opt = optim.AdamW(to_optimize, lr=dropped_learning_rate) # LR=0.2
elif args.optimiser == "Adagrad":
opt = optim.Adagrad(to_optimize, lr=dropped_learning_rate) # LR=0.5+
elif args.optimiser == "Adamax":
opt = optim.Adamax(to_optimize, lr=dropped_learning_rate) # LR=0.5+?
elif args.optimiser == "DiffGrad":
opt = DiffGrad(to_optimize, lr=dropped_learning_rate) # LR=2+?
elif args.optimiser == "AdamP":
opt = AdamP(to_optimize, lr=dropped_learning_rate) # LR=2+?
elif args.optimiser == "RAdam":
opt = RAdam(to_optimize, lr=dropped_learning_rate) # LR=2+?
new_opts = [opt]
return new_opts
def do_init(args):
global opts, perceptors, normalize, cutoutsTable, cutoutSizeTable
global z_orig, z_targets, z_labels, init_image_tensor, target_image_tensor
global gside_X, gside_Y, overlay_image_rgba
global pmsTable, pmsImageTable, pImages, device, spotPmsTable, spotOffPmsTable
global drawer
# do seed first!
if args.seed is None:
seed = torch.seed()
else:
seed = args.seed
int_seed = int(seed)%(2**30)
print('Using seed:', seed)
torch.manual_seed(seed)
np.random.seed(int_seed)
random.seed(int_seed)
# Do it (init that is)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
drawer = class_table[args.drawer](args)
drawer.load_model(args, device)
num_resolutions = drawer.get_num_resolutions()
# print("-----------> NUMR ", num_resolutions)
jit = True if float(torch.__version__[:3]) < 1.8 else False
f = 2**(num_resolutions - 1)
toksX, toksY = args.size[0] // f, args.size[1] // f
sideX, sideY = toksX * f, toksY * f
# save sideX, sideY in globals (need if using overlay)
gside_X = sideX
gside_Y = sideY
for clip_model in args.clip_models:
perceptor = clip.load(clip_model, jit=jit)[0].eval().requires_grad_(False).to(device)
perceptors[clip_model] = perceptor
cut_size = perceptor.visual.input_resolution
cutoutSizeTable[clip_model] = cut_size
if not cut_size in cutoutsTable:
make_cutouts = MakeCutouts(cut_size, args.num_cuts, cut_pow=args.cut_pow)
cutoutsTable[cut_size] = make_cutouts
init_image_tensor = None
target_image_tensor = None
# Image initialisation
if args.init_image or args.init_noise:
# setup init image wih pil
# first - always start with noise or blank
if args.init_noise == 'pixels':
img = random_noise_image(args.size[0], args.size[1])
elif args.init_noise == 'gradient':
img = random_gradient_image(args.size[0], args.size[1])
elif args.init_noise == 'snow':
img = old_random_noise_image(args.size[0], args.size[1])
else:
img = Image.new(mode="RGB", size=(args.size[0], args.size[1]), color=(255, 255, 255))
starting_image = img.convert('RGB')
starting_image = starting_image.resize((sideX, sideY), Image.LANCZOS)
if args.init_image:
# now we might overlay an init image (init_image also can be recycled as overlay)
if 'http' in args.init_image:
init_image = Image.open(urlopen(args.init_image))
else:
init_image = Image.open(args.init_image)
# this version is needed potentially for the loss function
init_image_rgb = init_image.convert('RGB')
init_image_rgb = init_image_rgb.resize((sideX, sideY), Image.LANCZOS)
init_image_tensor = TF.to_tensor(init_image_rgb)
init_image_tensor = init_image_tensor.to(device).unsqueeze(0)
# this version gets overlaid on the background (noise)
init_image_rgba = init_image.convert('RGBA')
init_image_rgba = init_image_rgba.resize((sideX, sideY), Image.LANCZOS)
top_image = init_image_rgba.copy()
if args.init_image_alpha and args.init_image_alpha >= 0:
top_image.putalpha(args.init_image_alpha)
starting_image.paste(top_image, (0, 0), top_image)
starting_image.save("starting_image.png")
starting_tensor = TF.to_tensor(starting_image)
init_tensor = starting_tensor.to(device).unsqueeze(0) * 2 - 1
drawer.init_from_tensor(init_tensor)
else:
# untested
drawer.rand_init(toksX, toksY)
if args.overlay_every:
if args.overlay_image:
if 'http' in args.overlay_image:
overlay_image = Image.open(urlopen(args.overlay_image))
else:
overlay_image = Image.open(args.overlay_image)
overlay_image_rgba = overlay_image.convert('RGBA')
overlay_image_rgba = overlay_image_rgba.resize((sideX, sideY), Image.LANCZOS)
else:
overlay_image_rgba = init_image_rgba
if args.overlay_alpha:
overlay_image_rgba.putalpha(args.overlay_alpha)
overlay_image_rgba.save('overlay_image.png')
if args.target_images is not None:
z_targets = []
filelist = real_glob(args.target_images)
for target_image in filelist:
target_image = Image.open(target_image)
target_image_rgb = target_image.convert('RGB')
target_image_rgb = target_image_rgb.resize((sideX, sideY), Image.LANCZOS)
target_image_tensor_local = TF.to_tensor(target_image_rgb)
target_image_tensor = target_image_tensor_local.to(device).unsqueeze(0) * 2 - 1
z_target = drawer.get_z_from_tensor(target_image_tensor)
z_targets.append(z_target)
if args.image_labels is not None:
z_labels = []
filelist = real_glob(args.image_labels)
cur_labels = []
for image_label in filelist:
image_label = Image.open(image_label)
image_label_rgb = image_label.convert('RGB')
image_label_rgb = image_label_rgb.resize((sideX, sideY), Image.LANCZOS)
image_label_rgb_tensor = TF.to_tensor(image_label_rgb)
image_label_rgb_tensor = image_label_rgb_tensor.to(device).unsqueeze(0) * 2 - 1
z_label = drawer.get_z_from_tensor(image_label_rgb_tensor)
cur_labels.append(z_label)
image_embeddings = torch.stack(cur_labels)
print("Processing labels: ", image_embeddings.shape)
image_embeddings /= image_embeddings.norm(dim=-1, keepdim=True)
image_embeddings = image_embeddings.mean(dim=0)
image_embeddings /= image_embeddings.norm()
z_labels.append(image_embeddings.unsqueeze(0))
z_orig = drawer.get_z_copy()
pmsTable = {}
pmsImageTable = {}
spotPmsTable = {}
spotOffPmsTable = {}
for clip_model in args.clip_models:
pmsTable[clip_model] = []
pmsImageTable[clip_model] = []
spotPmsTable[clip_model] = []
spotOffPmsTable[clip_model] = []
normalize = transforms.Normalize(mean=[0.48145466, 0.4578275, 0.40821073],
std=[0.26862954, 0.26130258, 0.27577711])
# CLIP tokenize/encode
# NR: Weights / blending
for prompt in args.prompts:
for clip_model in args.clip_models:
pMs = pmsTable[clip_model]
perceptor = perceptors[clip_model]
txt, weight, stop = parse_prompt(prompt)
embed = perceptor.encode_text(clip.tokenize(txt).to(device)).float()
pMs.append(Prompt(embed, weight, stop).to(device))
for vect_prompt in args.vector_prompts:
f1, weight, stop = parse_prompt(vect_prompt)
# vect_promts are by nature tuned to 10% of a normal prompt
weight = 0.1 * weight
if 'http' in f1:
# note: this is currently untested...
infile = urlopen(f1)
elif 'json' in f1:
infile = f1
else:
infile = f"vectors/{f1}.json"
if not os.path.exists(infile):
infile = f"pixray/vectors/{f1}.json"
with open(infile) as f_in:
vect_table = json.load(f_in)
for clip_model in args.clip_models:
pMs = pmsTable[clip_model]
v = np.array(vect_table[clip_model])
embed = torch.FloatTensor(v).to(device).float()
pMs.append(Prompt(embed, weight, stop).to(device))
for prompt in args.spot_prompts:
for clip_model in args.clip_models:
pMs = spotPmsTable[clip_model]
perceptor = perceptors[clip_model]
txt, weight, stop = parse_prompt(prompt)
embed = perceptor.encode_text(clip.tokenize(txt).to(device)).float()
pMs.append(Prompt(embed, weight, stop).to(device))
for prompt in args.spot_prompts_off:
for clip_model in args.clip_models:
pMs = spotOffPmsTable[clip_model]
perceptor = perceptors[clip_model]
txt, weight, stop = parse_prompt(prompt)
embed = perceptor.encode_text(clip.tokenize(txt).to(device)).float()
pMs.append(Prompt(embed, weight, stop).to(device))
for label in args.labels:
for clip_model in args.clip_models:
pMs = pmsTable[clip_model]
perceptor = perceptors[clip_model]
txt, weight, stop = parse_prompt(label)
texts = [template.format(txt) for template in imagenet_templates] #format with class
print(f"Tokenizing all of {texts}")
texts = clip.tokenize(texts).to(device) #tokenize
class_embeddings = perceptor.encode_text(texts) #embed with text encoder
class_embeddings /= class_embeddings.norm(dim=-1, keepdim=True)
class_embedding = class_embeddings.mean(dim=0)
class_embedding /= class_embedding.norm()
pMs.append(Prompt(class_embedding.unsqueeze(0), weight, stop).to(device))
for clip_model in args.clip_models:
pImages = pmsImageTable[clip_model]
for path in args.image_prompts:
img = Image.open(path)
pil_image = img.convert('RGB')
img = resize_image(pil_image, (sideX, sideY))
pImages.append(TF.to_tensor(img).unsqueeze(0).to(device))
for seed, weight in zip(args.noise_prompt_seeds, args.noise_prompt_weights):
gen = torch.Generator().manual_seed(seed)
embed = torch.empty([1, perceptor.visual.output_dim]).normal_(generator=gen)
pMs.append(Prompt(embed, weight).to(device))
opts = rebuild_optimisers(args)
# Output for the user
print('Using device:', device)
print('Optimising using:', args.optimiser)
if args.prompts:
print('Using text prompts:', args.prompts)
if args.spot_prompts:
print('Using spot prompts:', args.spot_prompts)
if args.spot_prompts_off:
print('Using spot off prompts:', args.spot_prompts_off)
if args.image_prompts:
print('Using #image prompts:', len(args.image_prompts))
if args.init_image:
print('Using initial image:', args.init_image)
if args.noise_prompt_weights:
print('Noise prompt weights:', args.noise_prompt_weights)
# dreaded globals (for now)
z_orig = None
z_targets = None
z_labels = None
opts = None
drawer = None
perceptors = {}
normalize = None
cutoutsTable = {}
cutoutSizeTable = {}
init_image_tensor = None
target_image_tensor = None
pmsTable = None
spotPmsTable = None
spotOffPmsTable = None
pmsImageTable = None
gside_X=None
gside_Y=None
overlay_image_rgba=None
device=None
cur_iteration=None
cur_anim_index=None
anim_output_files=[]
anim_cur_zs=[]
anim_next_zs=[]
best_loss = None
best_iter = None
best_z = None
num_loss_drop = 0
max_loss_drops = 2
iter_drop_delay = 20
def make_gif(args, iter):
gif_output = os.path.join(args.animation_dir, "anim.gif")
if os.path.exists(gif_output):
os.remove(gif_output)
cmd = ['ffmpeg', '-framerate', '10', '-pattern_type', 'glob',
'-i', f"{args.animation_dir}/*.png", '-loop', '0', gif_output]
try:
output = subprocess.check_output(cmd)
except subprocess.CalledProcessError as cpe:
output = cpe.output
print("Ignoring non-zero exit: ", output)
return gif_output
# !ffmpeg \
# -framerate 10 -pattern_type glob \
# -i '{animation_output}/*_*.png' \
# -loop 0 {animation_output}/final.gif
@torch.no_grad()
def checkdrop(args, iter, losses):
global best_loss, best_iter, best_z, num_loss_drop, max_loss_drops, iter_drop_delay
global drawer
drop_loss_time = False
loss_sum = sum(losses)
is_new_best = False
num_cycles_not_best = 0
if (loss_sum < best_loss):
is_new_best = True
best_loss = loss_sum
best_iter = iter
best_z = drawer.get_z_copy()
else:
num_cycles_not_best = iter - best_iter
if num_cycles_not_best >= iter_drop_delay:
drop_loss_time = True
return drop_loss_time
@torch.no_grad()
def checkin(args, iter, losses):
global drawer
global best_loss, best_iter, best_z, num_loss_drop, max_loss_drops, iter_drop_delay
num_cycles_not_best = iter - best_iter
if losses is not None:
losses_str = ', '.join(f'{loss.item():2.3g}' for loss in losses)
writestr = f'iter: {iter}, loss: {sum(losses).item():1.3g}, losses: {losses_str}'
else:
writestr = f'iter: {iter}, finished'
if args.animation_dir is not None:
writestr = f'anim: {cur_anim_index}/{len(anim_output_files)} {writestr}'
else:
writestr = f'{writestr} (-{num_cycles_not_best}=>{best_loss:2.4g})'
info = PngImagePlugin.PngInfo()
info.add_text('comment', f'{args.prompts}')
timg = drawer.synth(cur_iteration)
img = TF.to_pil_image(timg[0].cpu())
# img = drawer.to_image()
if cur_anim_index is None:
outfile = args.output
else:
outfile = anim_output_files[cur_anim_index]
img.save(outfile, pnginfo=info)
if cur_anim_index == len(anim_output_files) - 1:
# save gif
gif_output = make_gif(args, iter)
if IS_NOTEBOOK and iter % args.display_every == 0:
clear_output()
display.display(display.Image(open(gif_output,'rb').read()))
if IS_NOTEBOOK and iter % args.display_every == 0:
if cur_anim_index is None or iter == 0:
if args.display_clear:
clear_output()
display.display(display.Image(outfile))
tqdm.write(writestr)
def ascend_txt(args):
global cur_iteration, cur_anim_index, perceptors, normalize, cutoutsTable, cutoutSizeTable
global z_orig, z_targets, z_labels, init_image_tensor, target_image_tensor, drawer
global pmsTable, pmsImageTable, spotPmsTable, spotOffPmsTable, global_padding_mode
out = drawer.synth(cur_iteration);
result = []
if (cur_iteration%2 == 0):
global_padding_mode = 'reflection'
else:
global_padding_mode = 'border'
cur_cutouts = {}
cur_spot_cutouts = {}
cur_spot_off_cutouts = {}
for cutoutSize in cutoutsTable:
make_cutouts = cutoutsTable[cutoutSize]
cur_cutouts[cutoutSize] = make_cutouts(out)
if args.spot_prompts:
for cutoutSize in cutoutsTable:
cur_spot_cutouts[cutoutSize] = make_cutouts(out, spot=1)
if args.spot_prompts_off:
for cutoutSize in cutoutsTable:
cur_spot_off_cutouts[cutoutSize] = make_cutouts(out, spot=0)
for clip_model in args.clip_models:
perceptor = perceptors[clip_model]
cutoutSize = cutoutSizeTable[clip_model]
transient_pMs = []
if args.spot_prompts:
iii_s = perceptor.encode_image(normalize( cur_spot_cutouts[cutoutSize] )).float()
spotPms = spotPmsTable[clip_model]
for prompt in spotPms:
result.append(prompt(iii_s))
if args.spot_prompts_off:
iii_so = perceptor.encode_image(normalize( cur_spot_off_cutouts[cutoutSize] )).float()
spotOffPms = spotOffPmsTable[clip_model]
for prompt in spotOffPms:
result.append(prompt(iii_so))
pMs = pmsTable[clip_model]
iii = perceptor.encode_image(normalize( cur_cutouts[cutoutSize] )).float()
for prompt in pMs:
result.append(prompt(iii))
# If there are image prompts we make cutouts for those each time
# so that they line up with the current cutouts from augmentation
make_cutouts = cutoutsTable[cutoutSize]
# if animating select one pImage, otherwise use them all
if cur_anim_index is None:
pImages = pmsImageTable[clip_model]
else:
pImages = [ pmsImageTable[clip_model][cur_anim_index] ]
for timg in pImages:
# note: this caches and reuses the transforms - a bit of a hack but it works
if args.image_prompt_shuffle:
# print("Disabling cached transforms")
make_cutouts.transforms = None
# print("Building throwaway image prompts")
# new way builds throwaway Prompts
batch = make_cutouts(timg)
embed = perceptor.encode_image(normalize(batch)).float()
if args.image_prompt_weight is not None:
transient_pMs.append(Prompt(embed, args.image_prompt_weight).to(device))
else:
transient_pMs.append(Prompt(embed).to(device))
for prompt in transient_pMs:
result.append(prompt(iii))
if args.enforce_palette_annealing and args.target_palette:
target_palette = torch.FloatTensor(args.target_palette).requires_grad_(False).to(device)
_pixels = cur_cutouts[cutoutSize].permute(0,2,3,1).reshape(-1,3)
palette_dists = torch.cdist(target_palette, _pixels, p=2)
best_guesses = palette_dists.argmin(axis=0)
diffs = _pixels - target_palette[best_guesses]
palette_loss = torch.mean( torch.norm( diffs, 2, dim=1 ) )*cur_cutouts[cutoutSize].shape[0]
result.append( palette_loss*cur_iteration/args.enforce_palette_annealing )
if args.smoothness > 0 and args.smoothness_type:
_pixels = cur_cutouts[cutoutSize].permute(0,2,3,1).reshape(-1,cur_cutouts[cutoutSize].shape[2],3)
gyr, gxr = torch.gradient(_pixels[:,:,0])
gyg, gxg = torch.gradient(_pixels[:,:,1])
gyb, gxb = torch.gradient(_pixels[:,:,2])
sharpness = torch.sqrt(gyr**2 + gxr**2+ gyg**2 + gxg**2 + gyb**2 + gxb**2)
if args.smoothness_type=='clipped':
sharpness = torch.clamp( sharpness, max=0.5 )
elif args.smoothness_type=='log':
sharpness = torch.log( torch.ones_like(sharpness)+sharpness )
sharpness = torch.mean( sharpness )
result.append( sharpness*args.smoothness )
if args.saturation:
# based on the old "percepted colourfulness" heuristic from Hasler and Süsstrunk’s 2003 paper
# https://www.researchgate.net/publication/243135534_Measuring_Colourfulness_in_Natural_Images
_pixels = cur_cutouts[cutoutSize].permute(0,2,3,1).reshape(-1,3)
rg = _pixels[:,0]-_pixels[:,1]
yb = 0.5*(_pixels[:,0]+_pixels[:,1])-_pixels[:,2]
rg_std, rg_mean = torch.std_mean(rg)
yb_std, yb_mean = torch.std_mean(yb)
std_rggb = torch.sqrt(rg_std**2 + yb_std**2)
mean_rggb = torch.sqrt(rg_mean**2 + yb_mean**2)
colorfullness = std_rggb+.3*mean_rggb
result.append( -colorfullness*args.saturation/5.0 )
for cutoutSize in cutoutsTable:
# clear the transform "cache"
make_cutouts = cutoutsTable[cutoutSize]
make_cutouts.transforms = None
# main init_weight uses spherical loss
if args.target_images is not None and args.target_image_weight > 0:
if cur_anim_index is None:
cur_z_targets = z_targets
else:
cur_z_targets = [ z_targets[cur_anim_index] ]
for z_target in cur_z_targets:
f_z = drawer.get_z()
if f_z is not None:
f = f_z.reshape(1,-1)
f2 = z_target.reshape(1,-1)
cur_loss = spherical_dist_loss(f, f2) * args.target_image_weight
result.append(cur_loss)
if args.target_weight_pix:
if target_image_tensor is None:
print("OOPS TIT is 0")
else:
cur_loss = F.l1_loss(out, target_image_tensor) * args.target_weight_pix
result.append(cur_loss)
if args.image_labels is not None:
for z_label in z_labels:
f = drawer.get_z().reshape(1,-1)
f2 = z_label.reshape(1,-1)
cur_loss = spherical_dist_loss(f, f2) * args.image_label_weight
result.append(cur_loss)
# main init_weight uses spherical loss
if args.init_weight:
f = drawer.get_z().reshape(1,-1)
f2 = z_orig.reshape(1,-1)
cur_loss = spherical_dist_loss(f, f2) * args.init_weight
result.append(cur_loss)
# these three init_weight variants offer mse_loss, mse_loss in pixel space, and cos loss
if args.init_weight_dist:
cur_loss = F.mse_loss(z, z_orig) * args.init_weight_dist / 2
result.append(cur_loss)
if args.init_weight_pix:
if init_image_tensor is None:
print("OOPS IIT is 0")
else:
cur_loss = F.l1_loss(out, init_image_tensor) * args.init_weight_pix / 2
result.append(cur_loss)
if args.init_weight_cos:
f = drawer.get_z().reshape(1,-1)
f2 = z_orig.reshape(1,-1)
y = torch.ones_like(f[0])
cur_loss = F.cosine_embedding_loss(f, f2, y) * args.init_weight_cos
result.append(cur_loss)
if args.make_video:
img = np.array(out.mul(255).clamp(0, 255)[0].cpu().detach().numpy().astype(np.uint8))[:,:,:]
img = np.transpose(img, (1, 2, 0))
imageio.imwrite(f'./steps/frame_{cur_iteration:04d}.png', np.array(img))
return result
def re_average_z(args):
global gside_X, gside_Y
global device, drawer
# old_z = z.clone()
cur_z_image = drawer.to_image()
cur_z_image = cur_z_image.convert('RGB')
if overlay_image_rgba:
# print("applying overlay image")
cur_z_image.paste(overlay_image_rgba, (0, 0), overlay_image_rgba)
cur_z_image.save("overlaid.png")
cur_z_image = cur_z_image.resize((gside_X, gside_Y), Image.LANCZOS)
drawer.reapply_from_tensor(TF.to_tensor(cur_z_image).to(device).unsqueeze(0) * 2 - 1)
# torch.autograd.set_detect_anomaly(True)
def train(args, cur_it):
global drawer, opts
global best_loss, best_iter, best_z, num_loss_drop, max_loss_drops, iter_drop_delay
lossAll = None
if cur_it < args.iterations:
# this is awkward, but train is in also in charge of saving, so...
rebuild_opts_when_done = False
for opt in opts:
# opt.zero_grad(set_to_none=True)
opt.zero_grad()
# print("drops at ", args.learning_rate_drops)
# num_batches = args.batches * (num_loss_drop + 1)
num_batches = args.batches
for i in range(num_batches):
lossAll = ascend_txt(args)
if i == 0:
if cur_it in args.learning_rate_drops:
print("Dropping learning rate")
rebuild_opts_when_done = True
else:
did_drop = checkdrop(args, cur_it, lossAll)
if args.auto_stop is True:
rebuild_opts_when_done = disabl
if i == 0 and cur_it % args.save_every == 0:
checkin(args, cur_it, lossAll)
loss = sum(lossAll)
loss.backward()
for opt in opts:
opt.step()
if args.overlay_every and cur_it != 0 and \
(cur_it % (args.overlay_every + args.overlay_offset)) == 0:
re_average_z(args)
drawer.clip_z()
if cur_it == args.iterations:
# this resetting to best is currently disabled
# drawer.set_z(best_z)
checkin(args, cur_it, lossAll)
return False
if rebuild_opts_when_done:
num_loss_drop = num_loss_drop + 1
# this resetting to best is currently disabled
# drawer.set_z(best_z)
# always checkin (and save) after resetting z
# checkin(args, cur_it, lossAll)
if num_loss_drop > max_loss_drops:
return False
best_iter = cur_it
best_loss = 1e20
opts = rebuild_optimisers(args)
return True
imagenet_templates = [
"itap of a {}.",
"a bad photo of the {}.",
"a origami {}.",
"a photo of the large {}.",
"a {} in a video game.",
"art of the {}.",
"a photo of the small {}.",
]
def do_run(args):
global cur_iteration, cur_anim_index
global anim_cur_zs, anim_next_zs, anim_output_files
cur_iteration = 0
if args.animation_dir is not None:
# we already have z_targets. setup some sort of global ring
# we need something like
# copies of all the current z's (they can all start off all as copies)
# a list of all the output filenames
#
if not os.path.exists(args.animation_dir):
os.mkdir(args.animation_dir)
if args.target_images is not None:
filelist = real_glob(args.target_images)
else:
filelist = args.image_prompts
num_anim_frames = len(filelist)
for target_image in filelist:
basename = os.path.basename(target_image)
target_output = os.path.join(args.animation_dir, basename)
anim_output_files.append(target_output)
for i in range(num_anim_frames):
cur_z = drawer.get_z_copy()
anim_cur_zs.append(cur_z)
anim_next_zs.append(None)
step_iteration = 0
with tqdm() as pbar:
while True:
cur_images = []
for i in range(num_anim_frames):
# do merge frames here from cur->next when we are ready to be fancy
cur_anim_index = i
# anim_cur_zs[cur_anim_index] = anim_next_zs[cur_anim_index]
cur_iteration = step_iteration
drawer.set_z(anim_cur_zs[cur_anim_index])
for j in range(args.save_every):
keep_going = train(args, cur_iteration)
cur_iteration += 1
pbar.update()
# anim_next_zs[cur_anim_index] = drawer.get_z_copy()
cur_images.append(drawer.to_image())
step_iteration = step_iteration + args.save_every
if step_iteration >= args.iterations:
break
# compute the next round of cur_zs here from all the next_zs
for i in range(num_anim_frames):
prev_i = (i + num_anim_frames - 1) % num_anim_frames
base_image = cur_images[i].copy()
prev_image = cur_images[prev_i].copy().convert('RGBA')
prev_image.putalpha(args.animation_alpha)
base_image.paste(prev_image, (0, 0), prev_image)
# base_image.save(f"overlaid_{i:02d}.png")
drawer.reapply_from_tensor(TF.to_tensor(base_image).to(device).unsqueeze(0) * 2 - 1)
anim_cur_zs[i] = drawer.get_z_copy()
else:
try:
keep_going = True
with tqdm() as pbar:
while keep_going:
try:
keep_going = train(args, cur_iteration)
if cur_iteration == args.iterations:
break
cur_iteration += 1
pbar.update()
except RuntimeError as e:
print("Oops: runtime error: ", e)
print("Try reducing --num-cuts to save memory")
raise e
except KeyboardInterrupt:
pass
if args.make_video:
do_video(args)
def do_video(args):
global cur_iteration
# Video generation
init_frame = 1 # This is the frame where the video will start
last_frame = cur_iteration # You can change to the number of the last frame you want to generate. It will raise an error if that number of frames does not exist.
min_fps = 10
max_fps = 60
total_frames = last_frame-init_frame
length = 15 # Desired time of the video in seconds
frames = []
tqdm.write('Generating video...')
for i in range(init_frame,last_frame): #
frames.append(Image.open(f'./steps/frame_{i:04d}.png'))
#fps = last_frame/10
fps = np.clip(total_frames/length,min_fps,max_fps)
from subprocess import Popen, PIPE
import re
output_file = re.compile('\.png$').sub('.mp4', args.output)
p = Popen(['ffmpeg',
'-y',
'-f', 'image2pipe',
'-vcodec', 'png',
'-r', str(fps),
'-i',
'-',
'-vcodec', 'libx264',
'-r', str(fps),
'-pix_fmt', 'yuv420p',
'-crf', '17',
'-preset', 'veryslow',
'-metadata', f'comment={args.prompts}',
output_file], stdin=PIPE)
for im in tqdm(frames):
im.save(p.stdin, 'PNG')
p.stdin.close()
p.wait()
# this dictionary is used for settings in the notebook
global_pixray_settings = {}
def setup_parser(vq_parser):
# Create the parser
# vq_parser = argparse.ArgumentParser(description='Image generation using VQGAN+CLIP')
# Add the arguments
vq_parser.add_argument("-p", "--prompts", type=str, help="Text prompts", default=[], dest='prompts')
vq_parser.add_argument("-sp", "--spot", type=str, help="Spot Text prompts", default=[], dest='spot_prompts')
vq_parser.add_argument("-spo", "--spot_off", type=str, help="Spot off Text prompts", default=[], dest='spot_prompts_off')
vq_parser.add_argument("-spf", "--spot_file", type=str, help="Custom spot file", default=None, dest='spot_file')
vq_parser.add_argument("-l", "--labels", type=str, help="ImageNet labels", default=[], dest='labels')
vq_parser.add_argument("-vp", "--vector_prompts", type=str, help="Vector prompts", default=[], dest='vector_prompts')
vq_parser.add_argument("-ip", "--image_prompts", type=str, help="Image prompts", default=[], dest='image_prompts')
vq_parser.add_argument("-ipw", "--image_prompt_weight", type=float, help="Weight for image prompt", default=None, dest='image_prompt_weight')
vq_parser.add_argument("-ips", "--image_prompt_shuffle", type=bool, help="Shuffle image prompts", default=False, dest='image_prompt_shuffle')
vq_parser.add_argument("-il", "--image_labels", type=str, help="Image prompts", default=None, dest='image_labels')
vq_parser.add_argument("-ilw", "--image_label_weight", type=float, help="Weight for image prompt", default=1.0, dest='image_label_weight')
vq_parser.add_argument("-i", "--iterations", type=int, help="Number of iterations", default=None, dest='iterations')
vq_parser.add_argument("-se", "--save_every", type=int, help="Save image iterations", default=10, dest='save_every')
vq_parser.add_argument("-de", "--display_every", type=int, help="Display image iterations", default=20, dest='display_every')
vq_parser.add_argument("-dc", "--display_clear", type=bool, help="Clear dispaly when updating", default=False, dest='display_clear')
vq_parser.add_argument("-ove", "--overlay_every", type=int, help="Overlay image iterations", default=None, dest='overlay_every')
vq_parser.add_argument("-ovo", "--overlay_offset", type=int, help="Overlay image iteration offset", default=0, dest='overlay_offset')
vq_parser.add_argument("-ovi", "--overlay_image", type=str, help="Overlay image (if not init)", default=None, dest='overlay_image')
vq_parser.add_argument("-qua", "--quality", type=str, help="draft, normal, best", default="normal", dest='quality')
vq_parser.add_argument("-asp", "--aspect", type=str, help="widescreen, square", default="widescreen", dest='aspect')
vq_parser.add_argument("-ezs", "--ezsize", type=str, help="small, medium, large", default=None, dest='ezsize')
vq_parser.add_argument("-sca", "--scale", type=float, help="scale (instead of ezsize)", default=None, dest='scale')
vq_parser.add_argument("-ova", "--overlay_alpha", type=int, help="Overlay alpha (0-255)", default=None, dest='overlay_alpha')
vq_parser.add_argument("-s", "--size", nargs=2, type=int, help="Image size (width height)", default=None, dest='size')
vq_parser.add_argument("-ii", "--init_image", type=str, help="Initial image", default=None, dest='init_image')
vq_parser.add_argument("-iia", "--init_image_alpha", type=int, help="Init image alpha (0-255)", default=200, dest='init_image_alpha')
vq_parser.add_argument("-in", "--init_noise", type=str, help="Initial noise image (pixels or gradient)", default="pixels", dest='init_noise')
vq_parser.add_argument("-ti", "--target_images", type=str, help="Target images", default=None, dest='target_images')
vq_parser.add_argument("-tiw", "--target_image_weight", type=float, help="Target images weight", default=1.0, dest='target_image_weight')
vq_parser.add_argument("-twp", "--target_weight_pix", type=float, help="Target weight pix loss", default=0., dest='target_weight_pix')
vq_parser.add_argument("-anim", "--animation_dir", type=str, help="Animation output dir", default=None, dest='animation_dir')
vq_parser.add_argument("-ana", "--animation_alpha", type=int, help="Forward blend for consistency", default=128, dest='animation_alpha')
vq_parser.add_argument("-iw", "--init_weight", type=float, help="Initial weight (main=spherical)", default=None, dest='init_weight')
vq_parser.add_argument("-iwd", "--init_weight_dist", type=float, help="Initial weight dist loss", default=0., dest='init_weight_dist')
vq_parser.add_argument("-iwc", "--init_weight_cos", type=float, help="Initial weight cos loss", default=0., dest='init_weight_cos')
vq_parser.add_argument("-iwp", "--init_weight_pix", type=float, help="Initial weight pix loss", default=0., dest='init_weight_pix')
vq_parser.add_argument("-m", "--clip_models", type=str, help="CLIP model", default=None, dest='clip_models')
vq_parser.add_argument("-nps", "--noise_prompt_seeds", nargs="*", type=int, help="Noise prompt seeds", default=[], dest='noise_prompt_seeds')
vq_parser.add_argument("-npw", "--noise_prompt_weights", nargs="*", type=float, help="Noise prompt weights", default=[], dest='noise_prompt_weights')
vq_parser.add_argument("-lr", "--learning_rate", type=float, help="Learning rate", default=0.2, dest='learning_rate')
vq_parser.add_argument("-lrd", "--learning_rate_drops", nargs="*", type=float, help="When to drop learning rate (relative to iterations)", default=[75], dest='learning_rate_drops')
vq_parser.add_argument("-as", "--auto_stop", type=bool, help="Auto stopping", default=False, dest='auto_stop')
vq_parser.add_argument("-cuts", "--num_cuts", type=int, help="Number of cuts", default=None, dest='num_cuts')
vq_parser.add_argument("-bats", "--batches", type=int, help="How many batches of cuts", default=1, dest='batches')
vq_parser.add_argument("-cutp", "--cut_power", type=float, help="Cut power", default=1., dest='cut_pow')
vq_parser.add_argument("-sd", "--seed", type=int, help="Seed", default=None, dest='seed')
vq_parser.add_argument("-opt", "--optimiser", type=str, help="Optimiser (Adam, AdamW, Adagrad, Adamax, DiffGrad, AdamP or RAdam)", default='Adam', dest='optimiser')
vq_parser.add_argument("-o", "--output", type=str, help="Output file", default="output.png", dest='output')
vq_parser.add_argument("-vid", "--video", type=bool, help="Create video frames?", default=False, dest='make_video')
vq_parser.add_argument("-d", "--deterministic", type=bool, help="Enable cudnn.deterministic?", default=False, dest='cudnn_determinism')
vq_parser.add_argument("-mo", "--do_mono", type=bool, help="Monochromatic", default=False, dest='do_mono')
vq_parser.add_argument("-epw", "--enforce_palette_annealing", type=int, help="enforce palette annealing, 0 -- skip", default=5000, dest='enforce_palette_annealing')
vq_parser.add_argument("-tp", "--target_palette", type=str, help="target palette", default=None, dest='target_palette')
vq_parser.add_argument("-tpl", "--target_palette_length", type=int, help="target palette length", default=16, dest='target_palette_length')
vq_parser.add_argument("-smo", "--smoothness", type=float, help="encourage smoothness, 0 -- skip", default=0, dest='smoothness')
vq_parser.add_argument("-est", "--smoothness_type", type=str, help="enforce smoothness type: default/clipped/log", default='default', dest='smoothness_type')
vq_parser.add_argument("-sat", "--saturation", type=float, help="encourage saturation, 0 -- skip", default=0, dest='saturation')
return vq_parser
square_size = [144, 144]
widescreen_size = [200, 112] # at the small size this becomes 192,112
def process_args(vq_parser, namespace=None):
global global_aspect_width
global cur_iteration, cur_anim_index, anim_output_files, anim_cur_zs, anim_next_zs;
global global_spot_file
global best_loss, best_iter, best_z, num_loss_drop, max_loss_drops, iter_drop_delay
if namespace == None:
# command line: use ARGV to get args
args = vq_parser.parse_args()
elif isnotebook():
args = vq_parser.parse_args(args=[], namespace=namespace)
else:
# sometimes there are both settings and cmd line
args = vq_parser.parse_args(namespace=namespace)
if args.cudnn_determinism:
torch.backends.cudnn.deterministic = True
quality_to_clip_models_table = {
'draft': 'ViT-B/32',
'normal': 'ViT-B/32,ViT-B/16',
'better': 'RN50,ViT-B/32,ViT-B/16',
'best': 'RN50x4,ViT-B/32,ViT-B/16'
}
quality_to_iterations_table = {
'draft': 200,
'normal': 300,
'better': 400,
'best': 500
}
quality_to_scale_table = {
'draft': 1,
'normal': 2,
'better': 3,
'best': 4
}
# this should be replaced with logic that does somethings
# smart based on available memory (eg: size, num_models, etc)
quality_to_num_cuts_table = {
'draft': 40,
'normal': 40,
'better': 40,
'best': 40
}
if args.quality not in quality_to_clip_models_table:
print("Qualitfy setting not understood, aborting -> ", args.quality)
exit(1)
if args.clip_models is None:
args.clip_models = quality_to_clip_models_table[args.quality]
if args.iterations is None:
args.iterations = quality_to_iterations_table[args.quality]
if args.num_cuts is None:
args.num_cuts = quality_to_num_cuts_table[args.quality]
if args.ezsize is None and args.scale is None:
args.scale = quality_to_scale_table[args.quality]
size_to_scale_table = {
'small': 1,
'medium': 2,
'large': 4
}
aspect_to_size_table = {
'square': [150, 150],
'widescreen': [200, 112]
}
if args.size is not None:
global_aspect_width = args.size[0] / args.size[1]
elif args.aspect == "widescreen":
global_aspect_width = 16/9
else:
global_aspect_width = 1
# determine size if not set
if args.size is None:
size_scale = args.scale
if size_scale is None:
if args.ezsize in size_to_scale_table:
size_scale = size_to_scale_table[args.ezsize]
else:
print("EZ Size not understood, aborting -> ", args.ezsize)
exit(1)
if args.aspect in aspect_to_size_table:
base_size = aspect_to_size_table[args.aspect]
base_width = int(size_scale * base_size[0])
base_height = int(size_scale * base_size[1])
args.size = [base_width, base_height]
else:
print("aspect not understood, aborting -> ", args.aspect)
exit(1)
if args.init_noise.lower() == "none":
args.init_noise = None
# Split text prompts using the pipe character
if args.prompts:
args.prompts = [phrase.strip() for phrase in args.prompts.split("|")]
# Split text prompts using the pipe character
if args.spot_prompts:
args.spot_prompts = [phrase.strip() for phrase in args.spot_prompts.split("|")]
# Split text prompts using the pipe character
if args.spot_prompts_off:
args.spot_prompts_off = [phrase.strip() for phrase in args.spot_prompts_off.split("|")]
# Split text labels using the pipe character
if args.labels:
args.labels = [phrase.strip() for phrase in args.labels.split("|")]
# Split target images using the pipe character
if args.image_prompts:
args.image_prompts = real_glob(args.image_prompts)
# Split text prompts using the pipe character
if args.vector_prompts:
args.vector_prompts = [phrase.strip() for phrase in args.vector_prompts.split("|")]
if args.target_palette is not None:
args.target_palette = palette_from_string(args.target_palette)
if args.overlay_every is not None and args.overlay_every <= 0:
args.overlay_every = None
clip_models = args.clip_models.split(",")
args.clip_models = [model.strip() for model in clip_models]
# Make video steps directory
if args.make_video:
if not os.path.exists('steps'):
os.mkdir('steps')
if args.learning_rate_drops is None:
args.learning_rate_drops = []
else:
args.learning_rate_drops = [int(map_number(n, 0, 100, 0, args.iterations-1)) for n in args.learning_rate_drops]
# reset global animation variables
cur_iteration=0
best_iter = cur_iteration
best_loss = 1e20
num_loss_drop = 0
max_loss_drops = len(args.learning_rate_drops)
iter_drop_delay = 12
best_z = None
cur_anim_index=None
anim_output_files=[]
anim_cur_zs=[]
anim_next_zs=[]
global_spot_file = args.spot_file
return args
def reset_settings():
global global_pixray_settings
global_pixray_settings = {}
def add_settings(**kwargs):
global global_pixray_settings
for k, v in kwargs.items():
if v is None:
# just remove the key if it is there
global_pixray_settings.pop(k, None)
else:
global_pixray_settings[k] = v
def get_settings():
global global_pixray_settings
return global_pixray_settings.copy()
def apply_settings():
global global_pixray_settings
settingsDict = None
# first pass - just get the drawer
# Create the parser
vq_parser = argparse.ArgumentParser(description='Image generation using VQGAN+CLIP')
vq_parser.add_argument("--drawer", type=str, help="clipdraw, pixeldraw, etc", default="vqgan", dest='drawer')
settingsDict = SimpleNamespace(**global_pixray_settings)
settings_core, unknown = vq_parser.parse_known_args(namespace=settingsDict)
vq_parser = setup_parser(vq_parser)
class_table[settings_core.drawer].add_settings(vq_parser)
if len(global_pixray_settings) > 0:
# check for any bogus entries in the settings
dests = [d.dest for d in vq_parser._actions]
for k in global_pixray_settings:
if not k in dests:
raise ValueError(f"Requested setting not found, aborting: {k}={global_pixray_settings[k]}")
# convert dictionary to easyDict
# which can be used as an argparse namespace instead
# settingsDict = easydict.EasyDict(global_pixray_settings)
settingsDict = SimpleNamespace(**global_pixray_settings)
settings = process_args(vq_parser, settingsDict)
return settings
def command_line_override():
global global_pixray_settings
settingsDict = None
vq_parser = setup_parser()
settings = process_args(vq_parser)
return settings
def main():
settings = apply_settings()
do_init(settings)
do_run(settings)
if __name__ == '__main__':
main() |
py | 1a3bf3df288a418b4ba206ffd0c25ccfdf9fcbb6 | import subprocess
import typer
from typer.testing import CliRunner
from docs_src.first_steps import tutorial004 as mod
runner = CliRunner()
app = typer.Typer()
app.command()(mod.main)
def test_help():
result = runner.invoke(app, ["--help"])
assert result.exit_code == 0
assert "Arguments:" in result.output
assert "NAME [required]" in result.output
assert "LASTNAME [required]" in result.output
assert "--formal / --no-formal" in result.output
def test_1():
result = runner.invoke(app, ["Camila", "Gutiérrez"])
assert result.exit_code == 0
assert "Hello Camila Gutiérrez" in result.output
def test_formal_1():
result = runner.invoke(app, ["Camila", "Gutiérrez", "--formal"])
assert result.exit_code == 0
assert "Good day Ms. Camila Gutiérrez." in result.output
def test_formal_2():
result = runner.invoke(app, ["Camila", "--formal", "Gutiérrez"])
assert result.exit_code == 0
assert "Good day Ms. Camila Gutiérrez." in result.output
def test_formal_3():
result = runner.invoke(app, ["--formal", "Camila", "Gutiérrez"])
assert result.exit_code == 0
assert "Good day Ms. Camila Gutiérrez." in result.output
def test_script():
result = subprocess.run(
["coverage", "run", mod.__file__, "--help"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding="utf-8",
)
assert "Usage" in result.stdout
|
py | 1a3bf3eec242f0dbbf8a22123b39d928f1459704 | # coding: utf-8
"""
Dyspatch API
# Introduction The Dyspatch API is based on the REST paradigm, and features resource based URLs with standard HTTP response codes to indicate errors. We use standard HTTP authentication and request verbs, and all responses are JSON formatted. See our [Implementation Guide](https://docs.dyspatch.io/development/implementing_dyspatch/) for more details on how to implement Dyspatch. ## API Client Libraries Dyspatch provides API Clients for popular languages and web frameworks. - [Java](https://github.com/getdyspatch/dyspatch-java) - [Javascript](https://github.com/getdyspatch/dyspatch-javascript) - [Python](https://github.com/getdyspatch/dyspatch-python) - [C#](https://github.com/getdyspatch/dyspatch-dotnet) - [Go](https://github.com/getdyspatch/dyspatch-golang) - [Ruby](https://github.com/getdyspatch/dyspatch-ruby) # noqa: E501
The version of the OpenAPI document: 2020.11
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from dyspatch_client.configuration import Configuration
class TemplateRead(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'id': 'str',
'name': 'str',
'description': 'str',
'url': 'str',
'compiled': 'CompiledRead',
'created_at': 'datetime',
'updated_at': 'datetime',
'localizations': 'list[LocalizationMetaRead]'
}
attribute_map = {
'id': 'id',
'name': 'name',
'description': 'description',
'url': 'url',
'compiled': 'compiled',
'created_at': 'createdAt',
'updated_at': 'updatedAt',
'localizations': 'localizations'
}
def __init__(self, id=None, name=None, description=None, url=None, compiled=None, created_at=None, updated_at=None, localizations=None, local_vars_configuration=None): # noqa: E501
"""TemplateRead - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._id = None
self._name = None
self._description = None
self._url = None
self._compiled = None
self._created_at = None
self._updated_at = None
self._localizations = None
self.discriminator = None
if id is not None:
self.id = id
if name is not None:
self.name = name
if description is not None:
self.description = description
if url is not None:
self.url = url
if compiled is not None:
self.compiled = compiled
if created_at is not None:
self.created_at = created_at
if updated_at is not None:
self.updated_at = updated_at
if localizations is not None:
self.localizations = localizations
@property
def id(self):
"""Gets the id of this TemplateRead. # noqa: E501
An opaque, unique identifier for a template # noqa: E501
:return: The id of this TemplateRead. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this TemplateRead.
An opaque, unique identifier for a template # noqa: E501
:param id: The id of this TemplateRead. # noqa: E501
:type: str
"""
self._id = id
@property
def name(self):
"""Gets the name of this TemplateRead. # noqa: E501
The name of a template # noqa: E501
:return: The name of this TemplateRead. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this TemplateRead.
The name of a template # noqa: E501
:param name: The name of this TemplateRead. # noqa: E501
:type: str
"""
self._name = name
@property
def description(self):
"""Gets the description of this TemplateRead. # noqa: E501
A description of the template # noqa: E501
:return: The description of this TemplateRead. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this TemplateRead.
A description of the template # noqa: E501
:param description: The description of this TemplateRead. # noqa: E501
:type: str
"""
self._description = description
@property
def url(self):
"""Gets the url of this TemplateRead. # noqa: E501
The API url for a specific template # noqa: E501
:return: The url of this TemplateRead. # noqa: E501
:rtype: str
"""
return self._url
@url.setter
def url(self, url):
"""Sets the url of this TemplateRead.
The API url for a specific template # noqa: E501
:param url: The url of this TemplateRead. # noqa: E501
:type: str
"""
self._url = url
@property
def compiled(self):
"""Gets the compiled of this TemplateRead. # noqa: E501
:return: The compiled of this TemplateRead. # noqa: E501
:rtype: CompiledRead
"""
return self._compiled
@compiled.setter
def compiled(self, compiled):
"""Sets the compiled of this TemplateRead.
:param compiled: The compiled of this TemplateRead. # noqa: E501
:type: CompiledRead
"""
self._compiled = compiled
@property
def created_at(self):
"""Gets the created_at of this TemplateRead. # noqa: E501
The time of initial creation # noqa: E501
:return: The created_at of this TemplateRead. # noqa: E501
:rtype: datetime
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""Sets the created_at of this TemplateRead.
The time of initial creation # noqa: E501
:param created_at: The created_at of this TemplateRead. # noqa: E501
:type: datetime
"""
self._created_at = created_at
@property
def updated_at(self):
"""Gets the updated_at of this TemplateRead. # noqa: E501
The time of last update # noqa: E501
:return: The updated_at of this TemplateRead. # noqa: E501
:rtype: datetime
"""
return self._updated_at
@updated_at.setter
def updated_at(self, updated_at):
"""Sets the updated_at of this TemplateRead.
The time of last update # noqa: E501
:param updated_at: The updated_at of this TemplateRead. # noqa: E501
:type: datetime
"""
self._updated_at = updated_at
@property
def localizations(self):
"""Gets the localizations of this TemplateRead. # noqa: E501
A list of the Template's available localizations # noqa: E501
:return: The localizations of this TemplateRead. # noqa: E501
:rtype: list[LocalizationMetaRead]
"""
return self._localizations
@localizations.setter
def localizations(self, localizations):
"""Sets the localizations of this TemplateRead.
A list of the Template's available localizations # noqa: E501
:param localizations: The localizations of this TemplateRead. # noqa: E501
:type: list[LocalizationMetaRead]
"""
self._localizations = localizations
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TemplateRead):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, TemplateRead):
return True
return self.to_dict() != other.to_dict()
|
py | 1a3bf43de48d8f43da50dd4ade2f553a7e2b2daa | import sys
from typing import ( # type: ignore
TYPE_CHECKING,
AbstractSet,
Any,
ClassVar,
Dict,
Generator,
List,
Mapping,
NewType,
Optional,
Sequence,
Set,
Tuple,
Type,
Union,
_eval_type,
cast,
get_type_hints,
)
from typing_extensions import Annotated, Literal
try:
from typing import _TypingBase as typing_base # type: ignore
except ImportError:
from typing import _Final as typing_base # type: ignore
try:
from typing import GenericAlias # type: ignore
except ImportError:
# python < 3.9 does not have GenericAlias (list[int], tuple[str, ...] and so on)
GenericAlias = ()
if sys.version_info < (3, 7):
if TYPE_CHECKING:
class ForwardRef:
def __init__(self, arg: Any):
pass
def _eval_type(self, globalns: Any, localns: Any) -> Any:
pass
else:
from typing import _ForwardRef as ForwardRef
else:
from typing import ForwardRef
if sys.version_info < (3, 7):
def evaluate_forwardref(type_: ForwardRef, globalns: Any, localns: Any) -> Any:
return type_._eval_type(globalns, localns)
elif sys.version_info < (3, 9):
def evaluate_forwardref(type_: ForwardRef, globalns: Any, localns: Any) -> Any:
return type_._evaluate(globalns, localns)
else:
def evaluate_forwardref(type_: ForwardRef, globalns: Any, localns: Any) -> Any:
# Even though it is the right signature for python 3.9, mypy complains with
# `error: Too many arguments for "_evaluate" of "ForwardRef"` hence the cast...
return cast(Any, type_)._evaluate(globalns, localns, set())
if sys.version_info < (3, 9):
# Ensure we always get all the whole `Annotated` hint, not just the annotated type.
# For 3.6 to 3.8, `get_type_hints` doesn't recognize `typing_extensions.Annotated`,
# so it already returns the full annotation
get_all_type_hints = get_type_hints
else:
def get_all_type_hints(obj: Any, globalns: Any = None, localns: Any = None) -> Any:
return get_type_hints(obj, globalns, localns, include_extras=True)
if sys.version_info < (3, 7):
from typing import Callable as Callable
AnyCallable = Callable[..., Any]
NoArgAnyCallable = Callable[[], Any]
else:
from collections.abc import Callable as Callable
from typing import Callable as TypingCallable
AnyCallable = TypingCallable[..., Any]
NoArgAnyCallable = TypingCallable[[], Any]
# Annotated[...] is implemented by returning an instance of one of these classes, depending on
# python/typing_extensions version.
AnnotatedTypeNames = {'AnnotatedMeta', '_AnnotatedAlias'}
if sys.version_info < (3, 8):
def get_origin(t: Type[Any]) -> Optional[Type[Any]]:
if type(t).__name__ in AnnotatedTypeNames:
return cast(Type[Any], Annotated) # mypy complains about _SpecialForm in py3.6
return getattr(t, '__origin__', None)
else:
from typing import get_origin as _typing_get_origin
def get_origin(tp: Type[Any]) -> Type[Any]:
"""
We can't directly use `typing.get_origin` since we need a fallback to support
custom generic classes like `ConstrainedList`
It should be useless once https://github.com/cython/cython/issues/3537 is
solved and https://github.com/samuelcolvin/pydantic/pull/1753 is merged.
"""
if type(tp).__name__ in AnnotatedTypeNames:
return cast(Type[Any], Annotated) # mypy complains about _SpecialForm
return _typing_get_origin(tp) or getattr(tp, '__origin__', None)
if sys.version_info < (3, 7): # noqa: C901 (ignore complexity)
def get_args(t: Type[Any]) -> Tuple[Any, ...]:
"""Simplest get_args compatibility layer possible.
The Python 3.6 typing module does not have `_GenericAlias` so
this won't work for everything. In particular this will not
support the `generics` module (we don't support generic models in
python 3.6).
"""
if type(t).__name__ in AnnotatedTypeNames:
return t.__args__ + t.__metadata__
return getattr(t, '__args__', ())
elif sys.version_info < (3, 8): # noqa: C901
from typing import _GenericAlias
def get_args(t: Type[Any]) -> Tuple[Any, ...]:
"""Compatibility version of get_args for python 3.7.
Mostly compatible with the python 3.8 `typing` module version
and able to handle almost all use cases.
"""
if type(t).__name__ in AnnotatedTypeNames:
return t.__args__ + t.__metadata__
if isinstance(t, _GenericAlias):
res = t.__args__
if t.__origin__ is Callable and res and res[0] is not Ellipsis:
res = (list(res[:-1]), res[-1])
return res
return getattr(t, '__args__', ())
else:
from typing import get_args as _typing_get_args
def _generic_get_args(tp: Type[Any]) -> Tuple[Any, ...]:
"""
In python 3.9, `typing.Dict`, `typing.List`, ...
do have an empty `__args__` by default (instead of the generic ~T for example).
In order to still support `Dict` for example and consider it as `Dict[Any, Any]`,
we retrieve the `_nparams` value that tells us how many parameters it needs.
"""
if hasattr(tp, '_nparams'):
return (Any,) * tp._nparams
return ()
def get_args(tp: Type[Any]) -> Tuple[Any, ...]:
"""Get type arguments with all substitutions performed.
For unions, basic simplifications used by Union constructor are performed.
Examples::
get_args(Dict[str, int]) == (str, int)
get_args(int) == ()
get_args(Union[int, Union[T, int], str][int]) == (int, str)
get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int])
get_args(Callable[[], T][int]) == ([], int)
"""
if type(tp).__name__ in AnnotatedTypeNames:
return tp.__args__ + tp.__metadata__
# the fallback is needed for the same reasons as `get_origin` (see above)
return _typing_get_args(tp) or getattr(tp, '__args__', ()) or _generic_get_args(tp)
if TYPE_CHECKING:
from .fields import ModelField
TupleGenerator = Generator[Tuple[str, Any], None, None]
DictStrAny = Dict[str, Any]
DictAny = Dict[Any, Any]
SetStr = Set[str]
ListStr = List[str]
IntStr = Union[int, str]
AbstractSetIntStr = AbstractSet[IntStr]
DictIntStrAny = Dict[IntStr, Any]
MappingIntStrAny = Mapping[IntStr, Any]
CallableGenerator = Generator[AnyCallable, None, None]
ReprArgs = Sequence[Tuple[Optional[str], Any]]
__all__ = (
'ForwardRef',
'Callable',
'AnyCallable',
'NoArgAnyCallable',
'NoneType',
'NONE_TYPES',
'display_as_type',
'resolve_annotations',
'is_callable_type',
'is_literal_type',
'all_literal_values',
'is_namedtuple',
'is_typeddict',
'is_new_type',
'new_type_supertype',
'is_classvar',
'update_field_forward_refs',
'TupleGenerator',
'DictStrAny',
'DictAny',
'SetStr',
'ListStr',
'IntStr',
'AbstractSetIntStr',
'DictIntStrAny',
'CallableGenerator',
'ReprArgs',
'CallableGenerator',
'GenericAlias',
'get_args',
'get_origin',
'typing_base',
'get_all_type_hints',
)
NoneType = None.__class__
NONE_TYPES: Set[Any] = {None, NoneType, Literal[None]}
def display_as_type(v: Type[Any]) -> str:
if not isinstance(v, typing_base) and not isinstance(v, GenericAlias) and not isinstance(v, type):
v = v.__class__
if isinstance(v, GenericAlias):
# Generic alias are constructs like `list[int]`
return str(v).replace('typing.', '')
try:
return v.__name__
except AttributeError:
# happens with typing objects
return str(v).replace('typing.', '')
def resolve_annotations(raw_annotations: Dict[str, Type[Any]], module_name: Optional[str]) -> Dict[str, Type[Any]]:
"""
Partially taken from typing.get_type_hints.
Resolve string or ForwardRef annotations into type objects if possible.
"""
base_globals: Optional[Dict[str, Any]] = None
if module_name:
try:
module = sys.modules[module_name]
except KeyError:
# happens occasionally, see https://github.com/samuelcolvin/pydantic/issues/2363
pass
else:
base_globals = module.__dict__
annotations = {}
for name, value in raw_annotations.items():
if isinstance(value, str):
if sys.version_info >= (3, 7):
value = ForwardRef(value, is_argument=False)
else:
value = ForwardRef(value)
try:
value = _eval_type(value, base_globals, None)
except NameError:
# this is ok, it can be fixed with update_forward_refs
pass
annotations[name] = value
return annotations
def is_callable_type(type_: Type[Any]) -> bool:
return type_ is Callable or get_origin(type_) is Callable
if sys.version_info >= (3, 7):
def is_literal_type(type_: Type[Any]) -> bool:
return Literal is not None and get_origin(type_) is Literal
def literal_values(type_: Type[Any]) -> Tuple[Any, ...]:
return get_args(type_)
else:
def is_literal_type(type_: Type[Any]) -> bool:
return Literal is not None and hasattr(type_, '__values__') and type_ == Literal[type_.__values__]
def literal_values(type_: Type[Any]) -> Tuple[Any, ...]:
return type_.__values__
def all_literal_values(type_: Type[Any]) -> Tuple[Any, ...]:
"""
This method is used to retrieve all Literal values as
Literal can be used recursively (see https://www.python.org/dev/peps/pep-0586)
e.g. `Literal[Literal[Literal[1, 2, 3], "foo"], 5, None]`
"""
if not is_literal_type(type_):
return (type_,)
values = literal_values(type_)
return tuple(x for value in values for x in all_literal_values(value))
def is_namedtuple(type_: Type[Any]) -> bool:
"""
Check if a given class is a named tuple.
It can be either a `typing.NamedTuple` or `collections.namedtuple`
"""
from .utils import lenient_issubclass
return lenient_issubclass(type_, tuple) and hasattr(type_, '_fields')
def is_typeddict(type_: Type[Any]) -> bool:
"""
Check if a given class is a typed dict (from `typing` or `typing_extensions`)
In 3.10, there will be a public method (https://docs.python.org/3.10/library/typing.html#typing.is_typeddict)
"""
from .utils import lenient_issubclass
return lenient_issubclass(type_, dict) and hasattr(type_, '__total__')
test_type = NewType('test_type', str)
def is_new_type(type_: Type[Any]) -> bool:
"""
Check whether type_ was created using typing.NewType
"""
return isinstance(type_, test_type.__class__) and hasattr(type_, '__supertype__') # type: ignore
def new_type_supertype(type_: Type[Any]) -> Type[Any]:
while hasattr(type_, '__supertype__'):
type_ = type_.__supertype__
return type_
def _check_classvar(v: Optional[Type[Any]]) -> bool:
if v is None:
return False
return v.__class__ == ClassVar.__class__ and (sys.version_info < (3, 7) or getattr(v, '_name', None) == 'ClassVar')
def is_classvar(ann_type: Type[Any]) -> bool:
return _check_classvar(ann_type) or _check_classvar(get_origin(ann_type))
def update_field_forward_refs(field: 'ModelField', globalns: Any, localns: Any) -> None:
"""
Try to update ForwardRefs on fields based on this ModelField, globalns and localns.
"""
if field.type_.__class__ == ForwardRef:
field.type_ = evaluate_forwardref(field.type_, globalns, localns or None)
field.prepare()
if field.sub_fields:
for sub_f in field.sub_fields:
update_field_forward_refs(sub_f, globalns=globalns, localns=localns)
def get_class(type_: Type[Any]) -> Union[None, bool, Type[Any]]:
"""
Tries to get the class of a Type[T] annotation. Returns True if Type is used
without brackets. Otherwise returns None.
"""
try:
origin = get_origin(type_)
if origin is None: # Python 3.6
origin = type_
if issubclass(origin, Type): # type: ignore
if not get_args(type_) or not isinstance(get_args(type_)[0], type):
return True
return get_args(type_)[0]
except (AttributeError, TypeError):
pass
return None
|
py | 1a3bf460cf3e3648234e6ce2426822c3cd1d5608 | import math
import collections
class NaiveBayes:
classes = ['spam', 'ham']
# Word Lists
spam_list = []
ham_list = []
spam_file_count = 0
ham_file_count = 0
def __init__(self, spam_list, ham_list, spam_file_count, ham_file_count):
self.spam_list = spam_list
self.ham_list = ham_list
self.spam_file_count = spam_file_count
self.ham_file_count = ham_file_count
self.vocabulary = set(spam_list).union(set(ham_list))
self.spam_counter = collections.Counter(spam_list)
self.ham_counter = collections.Counter(ham_list)
def classify(self, test_file_word_list):
'''
it tests the testing data
log P(spam | test_file) proportional to --> log P(spam) + log P(word1|spam) + log P(word2|spam) + ... + log P (wordn|spam)
log P(spam) = len spam / (len spam + len ham)
log P(word i | spam) = log ( ( (count of word i) + 1 ) / ( (count of all words in both lists including duplicates) + len vocabulary ) )
denominator = (count of all words in both lists including duplicates) + len vocabulary --> is same for all terms except first term which is log P(spam)
'''
log_prob_spam = math.log(len(self.spam_list)/(len(self.spam_list)+len(self.ham_list)))
denominator = len(self.spam_list) + len(self.vocabulary)
for word in test_file_word_list:
numerator = self.spam_counter.get(word.lower(), 0) + 1
log_prob_spam += math.log(numerator/denominator)
log_prob_ham = math.log(len(self.ham_list)/(len(self.spam_list)+len(self.ham_list)))
denominator = len(self.ham_list) + len(self.vocabulary)
for word in test_file_word_list:
numerator = self.ham_counter.get(word.lower(), 0) + 1
log_prob_ham += math.log(numerator/denominator)
if log_prob_spam > log_prob_ham:
return self.classes[0]
else:
return self.classes[1] |
py | 1a3bf5f8efaf9a64a440a296ab8c07aeaef68756 | import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "bootcamp.settings")
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
application = get_wsgi_application()
application = DjangoWhiteNoise(application)
|
py | 1a3bf6a1257a1934a4a809f19c83268bbf821608 | import torch
from torch import nn
import torch.nn.functional as F
"""
Differences with V-Net
Adding nn.Tanh in the end of the conv. to make the outputs in [-1, 1].
"""
class ConvBlock(nn.Module):
def __init__(self, n_stages, n_filters_in, n_filters_out, normalization='none'):
super(ConvBlock, self).__init__()
ops = []
for i in range(n_stages):
if i==0:
input_channel = n_filters_in
else:
input_channel = n_filters_out
ops.append(nn.Conv3d(input_channel, n_filters_out, 3, padding=1))
if normalization == 'batchnorm':
ops.append(nn.BatchNorm3d(n_filters_out))
elif normalization == 'groupnorm':
ops.append(nn.GroupNorm(num_groups=16, num_channels=n_filters_out))
elif normalization == 'instancenorm':
ops.append(nn.InstanceNorm3d(n_filters_out))
elif normalization != 'none':
assert False
ops.append(nn.ReLU(inplace=True))
self.conv = nn.Sequential(*ops)
def forward(self, x):
x = self.conv(x)
return x
class ResidualConvBlock(nn.Module):
def __init__(self, n_stages, n_filters_in, n_filters_out, normalization='none'):
super(ResidualConvBlock, self).__init__()
ops = []
for i in range(n_stages):
if i == 0:
input_channel = n_filters_in
else:
input_channel = n_filters_out
ops.append(nn.Conv3d(input_channel, n_filters_out, 3, padding=1))
if normalization == 'batchnorm':
ops.append(nn.BatchNorm3d(n_filters_out))
elif normalization == 'groupnorm':
ops.append(nn.GroupNorm(num_groups=16, num_channels=n_filters_out))
elif normalization == 'instancenorm':
ops.append(nn.InstanceNorm3d(n_filters_out))
elif normalization != 'none':
assert False
if i != n_stages-1:
ops.append(nn.ReLU(inplace=True))
self.conv = nn.Sequential(*ops)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = (self.conv(x) + x)
x = self.relu(x)
return x
class DownsamplingConvBlock(nn.Module):
def __init__(self, n_filters_in, n_filters_out, stride=2, normalization='none'):
super(DownsamplingConvBlock, self).__init__()
ops = []
if normalization != 'none':
ops.append(nn.Conv3d(n_filters_in, n_filters_out, stride, padding=0, stride=stride))
if normalization == 'batchnorm':
ops.append(nn.BatchNorm3d(n_filters_out))
elif normalization == 'groupnorm':
ops.append(nn.GroupNorm(num_groups=16, num_channels=n_filters_out))
elif normalization == 'instancenorm':
ops.append(nn.InstanceNorm3d(n_filters_out))
else:
assert False
else:
ops.append(nn.Conv3d(n_filters_in, n_filters_out, stride, padding=0, stride=stride))
ops.append(nn.ReLU(inplace=True))
self.conv = nn.Sequential(*ops)
def forward(self, x):
x = self.conv(x)
return x
class UpsamplingDeconvBlock(nn.Module):
def __init__(self, n_filters_in, n_filters_out, stride=2, normalization='none'):
super(UpsamplingDeconvBlock, self).__init__()
ops = []
if normalization != 'none':
ops.append(nn.ConvTranspose3d(n_filters_in, n_filters_out, stride, padding=0, stride=stride))
if normalization == 'batchnorm':
ops.append(nn.BatchNorm3d(n_filters_out))
elif normalization == 'groupnorm':
ops.append(nn.GroupNorm(num_groups=16, num_channels=n_filters_out))
elif normalization == 'instancenorm':
ops.append(nn.InstanceNorm3d(n_filters_out))
else:
assert False
else:
ops.append(nn.ConvTranspose3d(n_filters_in, n_filters_out, stride, padding=0, stride=stride))
ops.append(nn.ReLU(inplace=True))
self.conv = nn.Sequential(*ops)
def forward(self, x):
x = self.conv(x)
return x
class Upsampling(nn.Module):
def __init__(self, n_filters_in, n_filters_out, stride=2, normalization='none'):
super(Upsampling, self).__init__()
ops = []
ops.append(nn.Upsample(scale_factor=stride, mode='trilinear',align_corners=False))
ops.append(nn.Conv3d(n_filters_in, n_filters_out, kernel_size=3, padding=1))
if normalization == 'batchnorm':
ops.append(nn.BatchNorm3d(n_filters_out))
elif normalization == 'groupnorm':
ops.append(nn.GroupNorm(num_groups=16, num_channels=n_filters_out))
elif normalization == 'instancenorm':
ops.append(nn.InstanceNorm3d(n_filters_out))
elif normalization != 'none':
assert False
ops.append(nn.ReLU(inplace=True))
self.conv = nn.Sequential(*ops)
def forward(self, x):
x = self.conv(x)
return x
class VNet(nn.Module):
def __init__(self, n_channels=3, n_classes=2, n_filters=16, normalization='none', has_dropout=False, has_residual=False):
super(VNet, self).__init__()
self.has_dropout = has_dropout
convBlock = ConvBlock if not has_residual else ResidualConvBlock
self.block_one = convBlock(1, n_channels, n_filters, normalization=normalization)
self.block_one_dw = DownsamplingConvBlock(n_filters, 2 * n_filters, normalization=normalization)
self.block_two = convBlock(2, n_filters * 2, n_filters * 2, normalization=normalization)
self.block_two_dw = DownsamplingConvBlock(n_filters * 2, n_filters * 4, normalization=normalization)
self.block_three = convBlock(3, n_filters * 4, n_filters * 4, normalization=normalization)
self.block_three_dw = DownsamplingConvBlock(n_filters * 4, n_filters * 8, normalization=normalization)
self.block_four = convBlock(3, n_filters * 8, n_filters * 8, normalization=normalization)
self.block_four_dw = DownsamplingConvBlock(n_filters * 8, n_filters * 16, normalization=normalization)
self.block_five = convBlock(3, n_filters * 16, n_filters * 16, normalization=normalization)
self.block_five_up = UpsamplingDeconvBlock(n_filters * 16, n_filters * 8, normalization=normalization)
self.block_six = convBlock(3, n_filters * 8, n_filters * 8, normalization=normalization)
self.block_six_up = UpsamplingDeconvBlock(n_filters * 8, n_filters * 4, normalization=normalization)
self.block_seven = convBlock(3, n_filters * 4, n_filters * 4, normalization=normalization)
self.block_seven_up = UpsamplingDeconvBlock(n_filters * 4, n_filters * 2, normalization=normalization)
self.block_eight = convBlock(2, n_filters * 2, n_filters * 2, normalization=normalization)
self.block_eight_up = UpsamplingDeconvBlock(n_filters * 2, n_filters, normalization=normalization)
self.block_nine = convBlock(1, n_filters, n_filters, normalization=normalization)
self.out_conv = nn.Conv3d(n_filters, n_classes, 1, padding=0)
self.out_conv2 = nn.Conv3d(n_filters, n_classes, 1, padding=0)
self.tanh = nn.Tanh()
self.dropout = nn.Dropout3d(p=0.5, inplace=False)
# self.__init_weight()
def encoder(self, input):
x1 = self.block_one(input)
x1_dw = self.block_one_dw(x1)
x2 = self.block_two(x1_dw)
x2_dw = self.block_two_dw(x2)
x3 = self.block_three(x2_dw)
x3_dw = self.block_three_dw(x3)
x4 = self.block_four(x3_dw)
x4_dw = self.block_four_dw(x4)
x5 = self.block_five(x4_dw)
# x5 = F.dropout3d(x5, p=0.5, training=True)
if self.has_dropout:
x5 = self.dropout(x5)
res = [x1, x2, x3, x4, x5]
return res
def decoder(self, features):
x1 = features[0]
x2 = features[1]
x3 = features[2]
x4 = features[3]
x5 = features[4]
x5_up = self.block_five_up(x5)
x5_up = x5_up + x4
x6 = self.block_six(x5_up)
x6_up = self.block_six_up(x6)
x6_up = x6_up + x3
x7 = self.block_seven(x6_up)
x7_up = self.block_seven_up(x7)
x7_up = x7_up + x2
x8 = self.block_eight(x7_up)
x8_up = self.block_eight_up(x8)
x8_up = x8_up + x1
x9 = self.block_nine(x8_up)
# x9 = F.dropout3d(x9, p=0.5, training=True)
if self.has_dropout:
x9 = self.dropout(x9)
out = self.out_conv(x9)
out_tanh = self.tanh(out)
out_seg = self.out_conv2(x9)
return out_tanh, out_seg
def forward(self, input, turnoff_drop=False):
if turnoff_drop:
has_dropout = self.has_dropout
self.has_dropout = False
features = self.encoder(input)
out_tanh, out_seg = self.decoder(features)
if turnoff_drop:
self.has_dropout = has_dropout
return out_tanh, out_seg
# def __init_weight(self):
# for m in self.modules():
# if isinstance(m, nn.Conv3d):
# torch.nn.init.kaiming_normal_(m.weight)
# elif isinstance(m, nn.BatchNorm3d):
# m.weight.data.fill_(1)
if __name__ == '__main__':
# compute FLOPS & PARAMETERS
from thop import profile
from thop import clever_format
model = VNet(n_channels=1, n_classes=2)
input = torch.randn(4, 1, 112, 112, 80)
flops, params = profile(model, inputs=(input,))
macs, params = clever_format([flops, params], "%.3f")
print(macs, params)
print("VNet have {} paramerters in total".format(sum(x.numel() for x in model.parameters())))
# import ipdb; ipdb.set_trace() |
py | 1a3bf841665b4d8d96fb146dd2a3d9c6537f53a5 | """Support for IKEA Tradfri covers."""
import logging
from pytradfri.error import PytradfriError
from homeassistant.components.cover import (
CoverDevice,
ATTR_POSITION,
SUPPORT_OPEN,
SUPPORT_CLOSE,
SUPPORT_SET_POSITION,
)
from homeassistant.core import callback
from .const import DOMAIN, KEY_GATEWAY, KEY_API, CONF_GATEWAY_ID
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Load Tradfri covers based on a config entry."""
gateway_id = config_entry.data[CONF_GATEWAY_ID]
api = hass.data[KEY_API][config_entry.entry_id]
gateway = hass.data[KEY_GATEWAY][config_entry.entry_id]
devices_commands = await api(gateway.get_devices())
devices = await api(devices_commands)
covers = [dev for dev in devices if dev.has_blind_control]
if covers:
async_add_entities(TradfriCover(cover, api, gateway_id) for cover in covers)
class TradfriCover(CoverDevice):
"""The platform class required by Home Assistant."""
def __init__(self, cover, api, gateway_id):
"""Initialize a cover."""
self._api = api
self._unique_id = f"{gateway_id}-{cover.id}"
self._cover = None
self._cover_control = None
self._cover_data = None
self._name = None
self._available = True
self._gateway_id = gateway_id
self._refresh(cover)
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_OPEN | SUPPORT_CLOSE | SUPPORT_SET_POSITION
@property
def unique_id(self):
"""Return unique ID for cover."""
return self._unique_id
@property
def device_info(self):
"""Return the device info."""
info = self._cover.device_info
return {
"identifiers": {(DOMAIN, self._cover.id)},
"name": self._name,
"manufacturer": info.manufacturer,
"model": info.model_number,
"sw_version": info.firmware_version,
"via_device": (DOMAIN, self._gateway_id),
}
async def async_added_to_hass(self):
"""Start thread when added to hass."""
self._async_start_observe()
@property
def available(self):
"""Return True if entity is available."""
return self._available
@property
def should_poll(self):
"""No polling needed for tradfri cover."""
return False
@property
def name(self):
"""Return the display name of this cover."""
return self._name
@property
def current_cover_position(self):
"""Return current position of cover.
None is unknown, 0 is closed, 100 is fully open.
"""
return 100 - self._cover_data.current_cover_position
async def async_set_cover_position(self, **kwargs):
"""Move the cover to a specific position."""
await self._api(self._cover_control.set_state(100 - kwargs[ATTR_POSITION]))
async def async_open_cover(self, **kwargs):
"""Open the cover."""
await self._api(self._cover_control.set_state(0))
async def async_close_cover(self, **kwargs):
"""Close cover."""
await self._api(self._cover_control.set_state(100))
@property
def is_closed(self):
"""Return if the cover is closed or not."""
return self.current_cover_position == 0
@callback
def _async_start_observe(self, exc=None):
"""Start observation of cover."""
if exc:
self._available = False
self.async_schedule_update_ha_state()
_LOGGER.warning("Observation failed for %s", self._name, exc_info=exc)
try:
cmd = self._cover.observe(
callback=self._observe_update,
err_callback=self._async_start_observe,
duration=0,
)
self.hass.async_create_task(self._api(cmd))
except PytradfriError as err:
_LOGGER.warning("Observation failed, trying again", exc_info=err)
self._async_start_observe()
def _refresh(self, cover):
"""Refresh the cover data."""
self._cover = cover
# Caching of BlindControl and cover object
self._available = cover.reachable
self._cover_control = cover.blind_control
self._cover_data = cover.blind_control.blinds[0]
self._name = cover.name
@callback
def _observe_update(self, tradfri_device):
"""Receive new state data for this cover."""
self._refresh(tradfri_device)
self.async_schedule_update_ha_state()
|
py | 1a3bfa3569420b2b80fdb3f72287a74a2a46a7b8 | import django_heroku # top of the file
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'vl5&8^s6nvai@bnc_3poi8979k@fjy+xpv%^fm!mq!s00-lr7k'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'polls.apps.PollsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
django_heroku.settings(locals()) # bottom of the file |
py | 1a3bfa502a8ad96be55e6503ebb6258df11bfd6f | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
"""
Utilities for downloading and building data.
These can be replaced if your particular file system does not support them.
"""
import time
import datetime
import os
import requests
import shutil
from parlai.core.utils import ProgressLogger
def built(path, version_string=None):
"""Checks if '.built' flag has been set for that task.
If a version_string is provided, this has to match, or the version
is regarded as not built.
"""
if version_string:
fname = os.path.join(path, '.built')
if not os.path.isfile(fname):
return False
else:
with open(fname, 'r') as read:
text = read.read().split('\n')
return (len(text) > 1 and text[1] == version_string)
else:
return os.path.isfile(os.path.join(path, '.built'))
def mark_done(path, version_string=None):
"""Marks the path as done by adding a '.built' file with the current
timestamp plus a version description string if specified.
"""
with open(os.path.join(path, '.built'), 'w') as write:
write.write(str(datetime.datetime.today()))
if version_string:
write.write('\n' + version_string)
def download(url, path, fname, redownload=False):
"""Downloads file using `requests`. If ``redownload`` is set to false, then
will not download tar file again if it is present (default ``True``)."""
outfile = os.path.join(path, fname)
download = not os.path.isfile(outfile) or redownload
retry = 5
exp_backoff = [2 ** r for r in reversed(range(retry))]
logger = ProgressLogger()
while download and retry >= 0:
resume_file = outfile + '.part'
resume = os.path.isfile(resume_file)
if resume:
resume_pos = os.path.getsize(resume_file)
mode = 'ab'
else:
resume_pos = 0
mode = 'wb'
response = None
with requests.Session() as session:
try:
header = {'Range': 'bytes=%d-' % resume_pos,
'Accept-Encoding': 'identity'} if resume else {}
response = session.get(url, stream=True, timeout=5, headers=header)
# negative reply could be 'none' or just missing
if resume and response.headers.get('Accept-Ranges', 'none') == 'none':
resume_pos = 0
mode = 'wb'
CHUNK_SIZE = 32768
total_size = int(response.headers.get('Content-Length', -1))
# server returns remaining size if resuming, so adjust total
total_size += resume_pos
done = resume_pos
with open(resume_file, mode) as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
if total_size > 0:
done += len(chunk)
if total_size < done:
# don't freak out if content-length was too small
total_size = done
logger.log(done, total_size)
break
except requests.exceptions.ConnectionError:
retry -= 1
print(''.join([' '] * 60), end='\r') # TODO Better way to clean progress bar?
if retry >= 0:
print('Connection error, retrying. (%d retries left)' % retry)
time.sleep(exp_backoff[retry])
else:
print('Retried too many times, stopped retrying.')
finally:
if response:
response.close()
if retry < 0:
raise RuntimeWarning('Connection broken too many times. Stopped retrying.')
if download and retry > 0:
logger.log(done, total_size, force=True)
print()
if done < total_size:
raise RuntimeWarning('Received less data than specified in ' +
'Content-Length header for ' + url + '.' +
' There may be a download problem.')
move(resume_file, outfile)
def make_dir(path):
"""Makes the directory and any nonexistent parent directories."""
os.makedirs(path, exist_ok=True)
def move(path1, path2):
"""Renames the given file."""
shutil.move(path1, path2)
def remove_dir(path):
"""Removes the given directory, if it exists."""
shutil.rmtree(path, ignore_errors=True)
def untar(path, fname, deleteTar=True):
"""Unpacks the given archive file to the same directory, then (by default)
deletes the archive file.
"""
print('unpacking ' + fname)
fullpath = os.path.join(path, fname)
shutil.unpack_archive(fullpath, path)
if deleteTar:
os.remove(fullpath)
def cat(file1, file2, outfile, deleteFiles=True):
with open(outfile, 'wb') as wfd:
for f in [file1, file2]:
with open(f,'rb') as fd:
shutil.copyfileobj(fd, wfd, 1024*1024*10)
#10MB per writing chunk to avoid reading big file into memory.
if deleteFiles:
os.remove(file1)
os.remove(file2)
def _get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def download_from_google_drive(gd_id, destination):
"""Uses the requests package to download a file from Google Drive."""
URL = 'https://docs.google.com/uc?export=download'
with requests.Session() as session:
response = session.get(URL, params={'id': gd_id}, stream=True)
token = _get_confirm_token(response)
if token:
response.close()
params = {'id': gd_id, 'confirm': token}
response = session.get(URL, params=params, stream=True)
CHUNK_SIZE = 32768
with open(destination, 'wb') as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
response.close()
def download_models(opt, fnames, model_folder, version='v1.0', path='aws', use_model_type=False):
"""Download models into the ParlAI model zoo from a url.
fnames -- list of filenames to download
model_folder -- models will be downloaded into models/model_folder/model_type
path -- url for downloading models; defaults to downloading from AWS
use_model_type -- whether models are categorized by type in AWS
"""
model_type = opt.get('model_type', None)
if model_type is not None:
dpath = os.path.join(opt['datapath'], 'models', model_folder, model_type)
else:
dpath = os.path.join(opt['datapath'], 'models', model_folder)
if not built(dpath, version):
for fname in fnames:
print('[building data: ' + dpath + '/' + fname + ']')
if built(dpath):
# An older version exists, so remove these outdated files.
remove_dir(dpath)
make_dir(dpath)
# Download the data.
for fname in fnames:
if path == 'aws':
if use_model_type:
url = 'https://s3.amazonaws.com/fair-data/parlai/_models/' + os.path.join(model_folder, model_type, fname)
else:
url = 'https://s3.amazonaws.com/fair-data/parlai/_models/' + os.path.join(model_folder, fname)
else:
url = path + '/' + fname
download(url, dpath, fname)
if '.tgz' in fname or '.gz' in fname:
untar(dpath, fname)
# Mark the data as built.
mark_done(dpath, version)
|
py | 1a3bfac477a70730787442dd75ba1dfd556e009c | # Lint as: python3
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for using the MetricsAndPlotsEvaluator API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import string
import numpy as np
import tensorflow as tf
from tensorflow_model_analysis import types
from tensorflow_model_analysis.eval_saved_model import testutil
from tensorflow_model_analysis.metrics import metric_types
from tensorflow_model_analysis.post_export_metrics import metric_keys
from tensorflow_model_analysis.post_export_metrics import post_export_metrics
from tensorflow_model_analysis.proto import metrics_for_slice_pb2
from tensorflow_model_analysis.slicer import slicer_lib as slicer
from tensorflow_model_analysis.writers import metrics_and_plots_serialization
from google.protobuf import text_format
def _make_slice_key(*args):
if len(args) % 2 != 0:
raise ValueError('number of arguments should be even')
result = []
for i in range(0, len(args), 2):
result.append((args[i], args[i + 1]))
result = tuple(result)
return result
class EvaluateMetricsAndPlotsTest(testutil.TensorflowModelAnalysisTest):
def setUp(self):
super(EvaluateMetricsAndPlotsTest, self).setUp()
self.longMessage = True # pylint: disable=invalid-name
def testSerializePlots(self):
slice_key = _make_slice_key('fruit', 'apple')
plot_key = metric_types.PlotKey(
name='calibration_plot', output_name='output_name')
calibration_plot = text_format.Parse(
"""
buckets {
lower_threshold_inclusive: -inf
upper_threshold_exclusive: 0.0
num_weighted_examples { value: 0.0 }
total_weighted_label { value: 0.0 }
total_weighted_refined_prediction { value: 0.0 }
}
buckets {
lower_threshold_inclusive: 0.0
upper_threshold_exclusive: 0.5
num_weighted_examples { value: 1.0 }
total_weighted_label { value: 1.0 }
total_weighted_refined_prediction { value: 0.3 }
}
buckets {
lower_threshold_inclusive: 0.5
upper_threshold_exclusive: 1.0
num_weighted_examples { value: 1.0 }
total_weighted_label { value: 0.0 }
total_weighted_refined_prediction { value: 0.7 }
}
buckets {
lower_threshold_inclusive: 1.0
upper_threshold_exclusive: inf
num_weighted_examples { value: 0.0 }
total_weighted_label { value: 0.0 }
total_weighted_refined_prediction { value: 0.0 }
}
""", metrics_for_slice_pb2.CalibrationHistogramBuckets())
expected_plots_for_slice = text_format.Parse(
"""
slice_key {
single_slice_keys {
column: 'fruit'
bytes_value: 'apple'
}
}
plot_keys_and_values {
key {
output_name: "output_name"
}
value {
calibration_histogram_buckets {
buckets {
lower_threshold_inclusive: -inf
upper_threshold_exclusive: 0.0
num_weighted_examples { value: 0.0 }
total_weighted_label { value: 0.0 }
total_weighted_refined_prediction { value: 0.0 }
}
buckets {
lower_threshold_inclusive: 0.0
upper_threshold_exclusive: 0.5
num_weighted_examples { value: 1.0 }
total_weighted_label { value: 1.0 }
total_weighted_refined_prediction { value: 0.3 }
}
buckets {
lower_threshold_inclusive: 0.5
upper_threshold_exclusive: 1.0
num_weighted_examples { value: 1.0 }
total_weighted_label { value: 0.0 }
total_weighted_refined_prediction { value: 0.7 }
}
buckets {
lower_threshold_inclusive: 1.0
upper_threshold_exclusive: inf
num_weighted_examples { value: 0.0 }
total_weighted_label { value: 0.0 }
total_weighted_refined_prediction { value: 0.0 }
}
}
}
}
""", metrics_for_slice_pb2.PlotsForSlice())
got = metrics_and_plots_serialization._serialize_plots((slice_key, {
plot_key: calibration_plot
}), None)
self.assertProtoEquals(expected_plots_for_slice,
metrics_for_slice_pb2.PlotsForSlice.FromString(got))
def testSerializePlotsLegacyStringKeys(self):
slice_key = _make_slice_key('fruit', 'apple')
tfma_plots = {
metric_keys.CALIBRATION_PLOT_MATRICES:
np.array([
[0.0, 0.0, 0.0],
[0.3, 1.0, 1.0],
[0.7, 0.0, 1.0],
[0.0, 0.0, 0.0],
]),
metric_keys.CALIBRATION_PLOT_BOUNDARIES:
np.array([0.0, 0.5, 1.0]),
}
expected_plot_data = """
slice_key {
single_slice_keys {
column: 'fruit'
bytes_value: 'apple'
}
}
plots {
key: "post_export_metrics"
value {
calibration_histogram_buckets {
buckets {
lower_threshold_inclusive: -inf
upper_threshold_exclusive: 0.0
num_weighted_examples { value: 0.0 }
total_weighted_label { value: 0.0 }
total_weighted_refined_prediction { value: 0.0 }
}
buckets {
lower_threshold_inclusive: 0.0
upper_threshold_exclusive: 0.5
num_weighted_examples { value: 1.0 }
total_weighted_label { value: 1.0 }
total_weighted_refined_prediction { value: 0.3 }
}
buckets {
lower_threshold_inclusive: 0.5
upper_threshold_exclusive: 1.0
num_weighted_examples { value: 1.0 }
total_weighted_label { value: 0.0 }
total_weighted_refined_prediction { value: 0.7 }
}
buckets {
lower_threshold_inclusive: 1.0
upper_threshold_exclusive: inf
num_weighted_examples { value: 0.0 }
total_weighted_label { value: 0.0 }
total_weighted_refined_prediction { value: 0.0 }
}
}
}
}
"""
calibration_plot = (
post_export_metrics.calibration_plot_and_prediction_histogram())
serialized = metrics_and_plots_serialization._serialize_plots(
(slice_key, tfma_plots), [calibration_plot])
self.assertProtoEquals(
expected_plot_data,
metrics_for_slice_pb2.PlotsForSlice.FromString(serialized))
def testSerializePlots_emptyPlot(self):
slice_key = _make_slice_key('fruit', 'apple')
tfma_plots = {metric_keys.ERROR_METRIC: 'error_message'}
actual_plot = metrics_and_plots_serialization._serialize_plots(
(slice_key, tfma_plots), [])
expected_plot = metrics_for_slice_pb2.PlotsForSlice()
expected_plot.slice_key.CopyFrom(slicer.serialize_slice_key(slice_key))
expected_plot.plots[
metric_keys.ERROR_METRIC].debug_message = 'error_message'
self.assertProtoEquals(
expected_plot,
metrics_for_slice_pb2.PlotsForSlice.FromString(actual_plot))
def testSerializeConfusionMatrices(self):
slice_key = _make_slice_key()
thresholds = [0.25, 0.75, 1.00]
matrices = [[0.0, 1.0, 0.0, 2.0, 1.0, 1.0], [1.0, 1.0, 0.0, 1.0, 1.0, 0.5],
[2.0, 1.0, 0.0, 0.0, float('nan'), 0.0]]
slice_metrics = {
metric_keys.CONFUSION_MATRIX_AT_THRESHOLDS_MATRICES: matrices,
metric_keys.CONFUSION_MATRIX_AT_THRESHOLDS_THRESHOLDS: thresholds,
}
expected_metrics_for_slice = text_format.Parse(
"""
slice_key {}
metrics {
key: "post_export_metrics/confusion_matrix_at_thresholds"
value {
confusion_matrix_at_thresholds {
matrices {
threshold: 0.25
false_negatives: 0.0
true_negatives: 1.0
false_positives: 0.0
true_positives: 2.0
precision: 1.0
recall: 1.0
bounded_false_negatives {
value {
value: 0.0
}
}
bounded_true_negatives {
value {
value: 1.0
}
}
bounded_true_positives {
value {
value: 2.0
}
}
bounded_false_positives {
value {
value: 0.0
}
}
bounded_precision {
value {
value: 1.0
}
}
bounded_recall {
value {
value: 1.0
}
}
t_distribution_false_negatives {
unsampled_value {
value: 0.0
}
}
t_distribution_true_negatives {
unsampled_value {
value: 1.0
}
}
t_distribution_true_positives {
unsampled_value {
value: 2.0
}
}
t_distribution_false_positives {
unsampled_value {
value: 0.0
}
}
t_distribution_precision {
unsampled_value {
value: 1.0
}
}
t_distribution_recall {
unsampled_value {
value: 1.0
}
}
}
matrices {
threshold: 0.75
false_negatives: 1.0
true_negatives: 1.0
false_positives: 0.0
true_positives: 1.0
precision: 1.0
recall: 0.5
bounded_false_negatives {
value {
value: 1.0
}
}
bounded_true_negatives {
value {
value: 1.0
}
}
bounded_true_positives {
value {
value: 1.0
}
}
bounded_false_positives {
value {
value: 0.0
}
}
bounded_precision {
value {
value: 1.0
}
}
bounded_recall {
value {
value: 0.5
}
}
t_distribution_false_negatives {
unsampled_value {
value: 1.0
}
}
t_distribution_true_negatives {
unsampled_value {
value: 1.0
}
}
t_distribution_true_positives {
unsampled_value {
value: 1.0
}
}
t_distribution_false_positives {
unsampled_value {
value: 0.0
}
}
t_distribution_precision {
unsampled_value {
value: 1.0
}
}
t_distribution_recall {
unsampled_value {
value: 0.5
}
}
}
matrices {
threshold: 1.00
false_negatives: 2.0
true_negatives: 1.0
false_positives: 0.0
true_positives: 0.0
precision: nan
recall: 0.0
bounded_false_negatives {
value {
value: 2.0
}
}
bounded_true_negatives {
value {
value: 1.0
}
}
bounded_true_positives {
value {
value: 0.0
}
}
bounded_false_positives {
value {
value: 0.0
}
}
bounded_precision {
value {
value: nan
}
}
bounded_recall {
value {
value: 0.0
}
}
t_distribution_false_negatives {
unsampled_value {
value: 2.0
}
}
t_distribution_true_negatives {
unsampled_value {
value: 1.0
}
}
t_distribution_true_positives {
unsampled_value {
value: 0.0
}
}
t_distribution_false_positives {
unsampled_value {
value: 0.0
}
}
t_distribution_precision {
unsampled_value {
value: nan
}
}
t_distribution_recall {
unsampled_value {
value: 0.0
}
}
}
}
}
}
""", metrics_for_slice_pb2.MetricsForSlice())
got = metrics_and_plots_serialization.convert_slice_metrics_to_proto(
(slice_key, slice_metrics),
[post_export_metrics.confusion_matrix_at_thresholds(thresholds)])
self.assertProtoEquals(expected_metrics_for_slice, got)
def testSerializeMetricsRanges(self):
slice_key = _make_slice_key('age', 5, 'language', 'english', 'price', 0.3)
slice_metrics = {
'accuracy': types.ValueWithTDistribution(0.8, 0.1, 9, 0.8),
metric_keys.AUPRC: 0.1,
metric_keys.lower_bound_key(metric_keys.AUPRC): 0.05,
metric_keys.upper_bound_key(metric_keys.AUPRC): 0.17,
metric_keys.AUC: 0.2,
metric_keys.lower_bound_key(metric_keys.AUC): 0.1,
metric_keys.upper_bound_key(metric_keys.AUC): 0.3
}
expected_metrics_for_slice = text_format.Parse(
string.Template("""
slice_key {
single_slice_keys {
column: 'age'
int64_value: 5
}
single_slice_keys {
column: 'language'
bytes_value: 'english'
}
single_slice_keys {
column: 'price'
float_value: 0.3
}
}
metrics {
key: "accuracy"
value {
bounded_value {
value {
value: 0.8
}
lower_bound {
value: 0.5737843
}
upper_bound {
value: 1.0262157
}
methodology: POISSON_BOOTSTRAP
}
confidence_interval {
lower_bound {
value: 0.5737843
}
upper_bound {
value: 1.0262157
}
t_distribution_value {
sample_mean {
value: 0.8
}
sample_standard_deviation {
value: 0.1
}
sample_degrees_of_freedom {
value: 9
}
unsampled_value {
value: 0.8
}
}
}
}
}
metrics {
key: "$auc"
value {
bounded_value {
lower_bound {
value: 0.1
}
upper_bound {
value: 0.3
}
value {
value: 0.2
}
methodology: RIEMANN_SUM
}
}
}
metrics {
key: "$auprc"
value {
bounded_value {
lower_bound {
value: 0.05
}
upper_bound {
value: 0.17
}
value {
value: 0.1
}
methodology: RIEMANN_SUM
}
}
}""").substitute(auc=metric_keys.AUC, auprc=metric_keys.AUPRC),
metrics_for_slice_pb2.MetricsForSlice())
got = metrics_and_plots_serialization.convert_slice_metrics_to_proto(
(slice_key, slice_metrics),
[post_export_metrics.auc(),
post_export_metrics.auc(curve='PR')])
self.assertProtoEquals(expected_metrics_for_slice, got)
def testSerializeMetrics(self):
slice_key = _make_slice_key('age', 5, 'language', 'english', 'price', 0.3)
slice_metrics = {
metric_types.MetricKey(name='accuracy', output_name='output_name'): 0.8
}
expected_metrics_for_slice = text_format.Parse(
"""
slice_key {
single_slice_keys {
column: 'age'
int64_value: 5
}
single_slice_keys {
column: 'language'
bytes_value: 'english'
}
single_slice_keys {
column: 'price'
float_value: 0.3
}
}
metric_keys_and_values {
key {
name: "accuracy"
output_name: "output_name"
}
value {
double_value {
value: 0.8
}
}
}""", metrics_for_slice_pb2.MetricsForSlice())
got = metrics_and_plots_serialization.convert_slice_metrics_to_proto(
(slice_key, slice_metrics), None)
self.assertProtoEquals(expected_metrics_for_slice, got)
def testSerializeMetricsFromLegacyStrings(self):
slice_key = _make_slice_key('age', 5, 'language', 'english', 'price', 0.3)
slice_metrics = {
'accuracy': 0.8,
metric_keys.AUPRC: 0.1,
metric_keys.lower_bound_key(metric_keys.AUPRC): 0.05,
metric_keys.upper_bound_key(metric_keys.AUPRC): 0.17,
metric_keys.AUC: 0.2,
metric_keys.lower_bound_key(metric_keys.AUC): 0.1,
metric_keys.upper_bound_key(metric_keys.AUC): 0.3
}
expected_metrics_for_slice = text_format.Parse(
string.Template("""
slice_key {
single_slice_keys {
column: 'age'
int64_value: 5
}
single_slice_keys {
column: 'language'
bytes_value: 'english'
}
single_slice_keys {
column: 'price'
float_value: 0.3
}
}
metrics {
key: "accuracy"
value {
double_value {
value: 0.8
}
}
}
metrics {
key: "$auc"
value {
bounded_value {
lower_bound {
value: 0.1
}
upper_bound {
value: 0.3
}
value {
value: 0.2
}
methodology: RIEMANN_SUM
}
}
}
metrics {
key: "$auprc"
value {
bounded_value {
lower_bound {
value: 0.05
}
upper_bound {
value: 0.17
}
value {
value: 0.1
}
methodology: RIEMANN_SUM
}
}
}""").substitute(auc=metric_keys.AUC, auprc=metric_keys.AUPRC),
metrics_for_slice_pb2.MetricsForSlice())
got = metrics_and_plots_serialization.convert_slice_metrics_to_proto(
(slice_key, slice_metrics),
[post_export_metrics.auc(),
post_export_metrics.auc(curve='PR')])
self.assertProtoEquals(expected_metrics_for_slice, got)
def testSerializeMetrics_emptyMetrics(self):
slice_key = _make_slice_key('age', 5, 'language', 'english', 'price', 0.3)
slice_metrics = {metric_keys.ERROR_METRIC: 'error_message'}
actual_metrics = (
metrics_and_plots_serialization.convert_slice_metrics_to_proto(
(slice_key, slice_metrics),
[post_export_metrics.auc(),
post_export_metrics.auc(curve='PR')]))
expected_metrics = metrics_for_slice_pb2.MetricsForSlice()
expected_metrics.slice_key.CopyFrom(slicer.serialize_slice_key(slice_key))
expected_metrics.metrics[
metric_keys.ERROR_METRIC].debug_message = 'error_message'
self.assertProtoEquals(expected_metrics, actual_metrics)
def testStringMetrics(self):
slice_key = _make_slice_key()
slice_metrics = {
'valid_ascii': b'test string',
'valid_unicode': b'\xF0\x9F\x90\x84', # U+1F404, Cow
'invalid_unicode': b'\xE2\x28\xA1',
}
expected_metrics_for_slice = metrics_for_slice_pb2.MetricsForSlice()
expected_metrics_for_slice.slice_key.SetInParent()
expected_metrics_for_slice.metrics[
'valid_ascii'].bytes_value = slice_metrics['valid_ascii']
expected_metrics_for_slice.metrics[
'valid_unicode'].bytes_value = slice_metrics['valid_unicode']
expected_metrics_for_slice.metrics[
'invalid_unicode'].bytes_value = slice_metrics['invalid_unicode']
got = metrics_and_plots_serialization.convert_slice_metrics_to_proto(
(slice_key, slice_metrics), [])
self.assertProtoEquals(expected_metrics_for_slice, got)
def testUncertaintyValuedMetrics(self):
slice_key = _make_slice_key()
slice_metrics = {
'one_dim':
types.ValueWithTDistribution(2.0, 1.0, 3, 2.0),
'nans':
types.ValueWithTDistribution(
float('nan'), float('nan'), -1, float('nan')),
}
expected_metrics_for_slice = text_format.Parse(
"""
slice_key {}
metrics {
key: "one_dim"
value {
bounded_value {
value {
value: 2.0
}
lower_bound {
value: -1.1824463
}
upper_bound {
value: 5.1824463
}
methodology: POISSON_BOOTSTRAP
}
confidence_interval {
lower_bound {
value: -1.1824463
}
upper_bound {
value: 5.1824463
}
t_distribution_value {
sample_mean {
value: 2.0
}
sample_standard_deviation {
value: 1.0
}
sample_degrees_of_freedom {
value: 3
}
unsampled_value {
value: 2.0
}
}
}
}
}
metrics {
key: "nans"
value {
bounded_value {
value {
value: nan
}
lower_bound {
value: nan
}
upper_bound {
value: nan
}
methodology: POISSON_BOOTSTRAP
}
confidence_interval {
lower_bound {
value: nan
}
upper_bound {
value: nan
}
t_distribution_value {
sample_mean {
value: nan
}
sample_standard_deviation {
value: nan
}
sample_degrees_of_freedom {
value: -1
}
unsampled_value {
value: nan
}
}
}
}
}
""", metrics_for_slice_pb2.MetricsForSlice())
got = metrics_and_plots_serialization.convert_slice_metrics_to_proto(
(slice_key, slice_metrics), [])
self.assertProtoEquals(expected_metrics_for_slice, got)
def testTensorValuedMetrics(self):
slice_key = _make_slice_key()
slice_metrics = {
'one_dim':
np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32),
'two_dims':
np.array([['two', 'dims', 'test'], ['TWO', 'DIMS', 'TEST']]),
'three_dims':
np.array([[[100, 200, 300]], [[500, 600, 700]]], dtype=np.int64),
}
expected_metrics_for_slice = text_format.Parse(
"""
slice_key {}
metrics {
key: "one_dim"
value {
array_value {
data_type: FLOAT32
shape: 4
float32_values: [1.0, 2.0, 3.0, 4.0]
}
}
}
metrics {
key: "two_dims"
value {
array_value {
data_type: BYTES
shape: [2, 3]
bytes_values: ["two", "dims", "test", "TWO", "DIMS", "TEST"]
}
}
}
metrics {
key: "three_dims"
value {
array_value {
data_type: INT64
shape: [2, 1, 3]
int64_values: [100, 200, 300, 500, 600, 700]
}
}
}
""", metrics_for_slice_pb2.MetricsForSlice())
got = metrics_and_plots_serialization.convert_slice_metrics_to_proto(
(slice_key, slice_metrics), [])
self.assertProtoEquals(expected_metrics_for_slice, got)
if __name__ == '__main__':
tf.test.main()
|
py | 1a3bfd09295fa0edb757d2253aca13bfd08d1dcb | '''
A Keras port of the original Caffe SSD300 network.
Copyright (C) 2018 Pierluigi Ferrari
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from __future__ import division
import numpy as np
from keras.models import Model
from keras.layers import Input, Lambda, Activation, Conv2D, MaxPooling2D, ZeroPadding2D, Reshape, Concatenate, SeparableConv2D, Dropout, BatchNormalization
from keras.layers import DepthwiseConv2D, AveragePooling2D, Add
import keras.backend as K
from keras_layers.keras_layer_AnchorBoxes import AnchorBoxes
from keras_layers.keras_layer_DecodeDetections import DecodeDetections
from keras_layers.keras_layer_DecodeDetectionsFast import DecodeDetectionsFast
import sys, os
import light_networks.shufflenetv2_relu6_se_no_shuffle.shufflenetv2 as shufflenet_v2
import light_networks.shufflenetv2_relu6_se_no_shuffle.utils as utils
def ssd_300(image_size,
n_classes,
input_tensor = None,
mode='training',
scale_factor=1,
min_scale=None,
max_scale=None,
scales=None,
aspect_ratios_global=None,
aspect_ratios_per_layer=[[1.0, 2.0, 0.5],
[1.0, 2.0, 0.5, 3.0, 1.0/3.0],
[1.0, 2.0, 0.5, 3.0, 1.0/3.0],
[1.0, 2.0, 0.5, 3.0, 1.0/3.0],
[1.0, 2.0, 0.5],
[1.0, 2.0, 0.5]],
two_boxes_for_ar1=True,
steps=[8, 16, 32, 64, 100, 300],
offsets=None,
clip_boxes=False,
variances=[0.1, 0.1, 0.2, 0.2],
coords='centroids',
normalize_coords=True,
subtract_mean=[123, 117, 104],
divide_by_stddev=None,
swap_channels=[2, 1, 0],
confidence_thresh=0.01,
iou_threshold=0.45,
top_k=200,
nms_max_output_size=400,
return_predictor_sizes=False):
'''
Build a Keras model with SSD300 architecture, see references.
The base network is a reduced atrous VGG-16, extended by the SSD architecture,
as described in the paper.
Most of the arguments that this function takes are only needed for the anchor
box layers. In case you're training the network, the parameters passed here must
be the same as the ones used to set up `SSDBoxEncoder`. In case you're loading
trained weights, the parameters passed here must be the same as the ones used
to produce the trained weights.
Some of these arguments are explained in more detail in the documentation of the
`SSDBoxEncoder` class.
Note: Requires Keras v2.0 or later. Currently works only with the
TensorFlow backend (v1.0 or later).
Arguments:
image_size (tuple): The input image size in the format `(height, width, channels)`.
input_tensor: Tensor with shape (batch, height, width, channels)
n_classes (int): The number of positive classes, e.g. 20 for Pascal VOC, 80 for MS COCO.
mode (str, optional): One of 'training', 'inference' and 'inference_fast'. In 'training' mode,
the model outputs the raw prediction tensor, while in 'inference' and 'inference_fast' modes,
the raw predictions are decoded into absolute coordinates and filtered via confidence thresholding,
non-maximum suppression, and top-k filtering. The difference between latter two modes is that
'inference' follows the exact procedure of the original Caffe implementation, while
'inference_fast' uses a faster prediction decoding procedure.
min_scale (float, optional): The smallest scaling factor for the size of the anchor boxes as a fraction
of the shorter side of the input images.
max_scale (float, optional): The largest scaling factor for the size of the anchor boxes as a fraction
of the shorter side of the input images. All scaling factors between the smallest and the
largest will be linearly interpolated. Note that the second to last of the linearly interpolated
scaling factors will actually be the scaling factor for the last predictor layer, while the last
scaling factor is used for the second box for aspect ratio 1 in the last predictor layer
if `two_boxes_for_ar1` is `True`.
scales (list, optional): A list of floats containing scaling factors per convolutional predictor layer.
This list must be one element longer than the number of predictor layers. The first `k` elements are the
scaling factors for the `k` predictor layers, while the last element is used for the second box
for aspect ratio 1 in the last predictor layer if `two_boxes_for_ar1` is `True`. This additional
last scaling factor must be passed either way, even if it is not being used. If a list is passed,
this argument overrides `min_scale` and `max_scale`. All scaling factors must be greater than zero.
aspect_ratios_global (list, optional): The list of aspect ratios for which anchor boxes are to be
generated. This list is valid for all prediction layers.
aspect_ratios_per_layer (list, optional): A list containing one aspect ratio list for each prediction layer.
This allows you to set the aspect ratios for each predictor layer individually, which is the case for the
original SSD300 implementation. If a list is passed, it overrides `aspect_ratios_global`.
two_boxes_for_ar1 (bool, optional): Only relevant for aspect ratio lists that contain 1. Will be ignored otherwise.
If `True`, two anchor boxes will be generated for aspect ratio 1. The first will be generated
using the scaling factor for the respective layer, the second one will be generated using
geometric mean of said scaling factor and next bigger scaling factor.
steps (list, optional): `None` or a list with as many elements as there are predictor layers. The elements can be
either ints/floats or tuples of two ints/floats. These numbers represent for each predictor layer how many
pixels apart the anchor box center points should be vertically and horizontally along the spatial grid over
the image. If the list contains ints/floats, then that value will be used for both spatial dimensions.
If the list contains tuples of two ints/floats, then they represent `(step_height, step_width)`.
If no steps are provided, then they will be computed such that the anchor box center points will form an
equidistant grid within the image dimensions.
offsets (list, optional): `None` or a list with as many elements as there are predictor layers. The elements can be
either floats or tuples of two floats. These numbers represent for each predictor layer how many
pixels from the top and left boarders of the image the top-most and left-most anchor box center points should be
as a fraction of `steps`. The last bit is important: The offsets are not absolute pixel values, but fractions
of the step size specified in the `steps` argument. If the list contains floats, then that value will
be used for both spatial dimensions. If the list contains tuples of two floats, then they represent
`(vertical_offset, horizontal_offset)`. If no offsets are provided, then they will default to 0.5 of the step size.
clip_boxes (bool, optional): If `True`, clips the anchor box coordinates to stay within image boundaries.
variances (list, optional): A list of 4 floats >0. The anchor box offset for each coordinate will be divided by
its respective variance value.
coords (str, optional): The box coordinate format to be used internally by the model (i.e. this is not the input format
of the ground truth labels). Can be either 'centroids' for the format `(cx, cy, w, h)` (box center coordinates, width,
and height), 'minmax' for the format `(xmin, xmax, ymin, ymax)`, or 'corners' for the format `(xmin, ymin, xmax, ymax)`.
normalize_coords (bool, optional): Set to `True` if the model is supposed to use relative instead of absolute coordinates,
i.e. if the model predicts box coordinates within [0,1] instead of absolute coordinates.
subtract_mean (array-like, optional): `None` or an array-like object of integers or floating point values
of any shape that is broadcast-compatible with the image shape. The elements of this array will be
subtracted from the image pixel intensity values. For example, pass a list of three integers
to perform per-channel mean normalization for color images.
divide_by_stddev (array-like, optional): `None` or an array-like object of non-zero integers or
floating point values of any shape that is broadcast-compatible with the image shape. The image pixel
intensity values will be divided by the elements of this array. For example, pass a list
of three integers to perform per-channel standard deviation normalization for color images.
swap_channels (list, optional): Either `False` or a list of integers representing the desired order in which the input
image channels should be swapped.
confidence_thresh (float, optional): A float in [0,1), the minimum classification confidence in a specific
positive class in order to be considered for the non-maximum suppression stage for the respective class.
A lower value will result in a larger part of the selection process being done by the non-maximum suppression
stage, while a larger value will result in a larger part of the selection process happening in the confidence
thresholding stage.
iou_threshold (float, optional): A float in [0,1]. All boxes that have a Jaccard similarity of greater than `iou_threshold`
with a locally maximal box will be removed from the set of predictions for a given class, where 'maximal' refers
to the box's confidence score.
top_k (int, optional): The number of highest scoring predictions to be kept for each batch item after the
non-maximum suppression stage.
nms_max_output_size (int, optional): The maximal number of predictions that will be left over after the NMS stage.
return_predictor_sizes (bool, optional): If `True`, this function not only returns the model, but also
a list containing the spatial dimensions of the predictor layers. This isn't strictly necessary since
you can always get their sizes easily via the Keras API, but it's convenient and less error-prone
to get them this way. They are only relevant for training anyway (SSDBoxEncoder needs to know the
spatial dimensions of the predictor layers), for inference you don't need them.
Returns:
model: The Keras SSD300 model.
predictor_sizes (optional): A Numpy array containing the `(height, width)` portion
of the output tensor shape for each convolutional predictor layer. During
training, the generator function needs this in order to transform
the ground truth labels into tensors of identical structure as the
output tensors of the model, which is in turn needed for the cost
function.
References:
https://arxiv.org/abs/1512.02325v5
'''
n_predictor_layers = 6 # The number of predictor conv layers in the network is 6 for the original SSD300.
n_classes += 1 # Account for the background class.
img_height, img_width, img_channels = image_size[0], image_size[1], image_size[2]
############################################################################
# Get a few exceptions out of the way.
############################################################################
if aspect_ratios_global is None and aspect_ratios_per_layer is None:
raise ValueError("`aspect_ratios_global` and `aspect_ratios_per_layer` cannot both be None. At least one needs to be specified.")
if aspect_ratios_per_layer:
if len(aspect_ratios_per_layer) != n_predictor_layers:
raise ValueError("It must be either aspect_ratios_per_layer is None or len(aspect_ratios_per_layer) == {}, but len(aspect_ratios_per_layer) == {}.".format(n_predictor_layers, len(aspect_ratios_per_layer)))
if (min_scale is None or max_scale is None) and scales is None:
raise ValueError("Either `min_scale` and `max_scale` or `scales` need to be specified.")
if scales:
if len(scales) != n_predictor_layers+1:
raise ValueError("It must be either scales is None or len(scales) == {}, but len(scales) == {}.".format(n_predictor_layers+1, len(scales)))
else: # If no explicit list of scaling factors was passed, compute the list of scaling factors from `min_scale` and `max_scale`
scales = np.linspace(min_scale, max_scale, n_predictor_layers+1)
if len(variances) != 4:
raise ValueError("4 variance values must be pased, but {} values were received.".format(len(variances)))
variances = np.array(variances)
if np.any(variances <= 0):
raise ValueError("All variances must be >0, but the variances given are {}".format(variances))
if (not (steps is None)) and (len(steps) != n_predictor_layers):
raise ValueError("You must provide at least one step value per predictor layer.")
if (not (offsets is None)) and (len(offsets) != n_predictor_layers):
raise ValueError("You must provide at least one offset value per predictor layer.")
############################################################################
# Compute the anchor box parameters.
############################################################################
# Set the aspect ratios for each predictor layer. These are only needed for the anchor box layers.
if aspect_ratios_per_layer:
aspect_ratios = aspect_ratios_per_layer
else:
aspect_ratios = [aspect_ratios_global] * n_predictor_layers
# Compute the number of boxes to be predicted per cell for each predictor layer.
# We need this so that we know how many channels the predictor layers need to have.
if aspect_ratios_per_layer:
n_boxes = []
for ar in aspect_ratios_per_layer:
if (1 in ar) & two_boxes_for_ar1:
n_boxes.append(len(ar) + 1) # +1 for the second box for aspect ratio 1
else:
n_boxes.append(len(ar))
else: # If only a global aspect ratio list was passed, then the number of boxes is the same for each predictor layer
if (1 in aspect_ratios_global) & two_boxes_for_ar1:
n_boxes = len(aspect_ratios_global) + 1
else:
n_boxes = len(aspect_ratios_global)
n_boxes = [n_boxes] * n_predictor_layers
if steps is None:
steps = [None] * n_predictor_layers
if offsets is None:
offsets = [None] * n_predictor_layers
############################################################################
# Define functions for the Lambda layers below.
############################################################################
def identity_layer(tensor):
return tensor
def input_mean_normalization(tensor):
return tensor - np.array(subtract_mean)
def input_stddev_normalization(tensor):
return tensor / np.array(divide_by_stddev)
def input_channel_swap(tensor):
if len(swap_channels) == 3:
return K.stack([tensor[...,swap_channels[0]], tensor[...,swap_channels[1]], tensor[...,swap_channels[2]]], axis=-1)
elif len(swap_channels) == 4:
return K.stack([tensor[...,swap_channels[0]], tensor[...,swap_channels[1]], tensor[...,swap_channels[2]], tensor[...,swap_channels[3]]], axis=-1)
def relu6(x):
return K.relu(x, max_value=6)
############################################################################
# Build the network.
############################################################################
if input_tensor != None:
x = Input(tensor=input_tensor, shape=(img_height, img_width, img_channels))
else:
x = Input(shape=(img_height, img_width, img_channels))
# The following identity layer is only needed so that the subsequent lambda layers can be optional.
x1 = Lambda(identity_layer, output_shape=(img_height, img_width, img_channels), name='identity_layer')(x)
if not (divide_by_stddev is None):
x1 = Lambda(input_stddev_normalization, output_shape=(img_height, img_width, img_channels), name='input_stddev_normalization')(x1)
if not (subtract_mean is None):
x1 = Lambda(input_mean_normalization, output_shape=(img_height, img_width, img_channels), name='input_mean_normalization')(x1)
if swap_channels:
x1 = Lambda(input_channel_swap, output_shape=(img_height, img_width, img_channels), name='input_channel_swap')(x1)
num_shuffle_units = list([3,7,3])
out_dim_stage_two = {0.5:48, 1:116, 1.5:176, 2:244}
exp = np.insert(np.arange(len(num_shuffle_units), dtype=np.float32), 0, 0) # [0., 0., 1., 2.]
out_channels_in_stage = 2**exp
out_channels_in_stage *= out_dim_stage_two[scale_factor] # calculate output channels for each stage
out_channels_in_stage[0] = 24 # first stage has always 24 output channels
out_channels_in_stage = out_channels_in_stage.astype(int)
# change last conv
if scale_factor == 2:
k = 2048
else:
k = 1024
# elif scale_factor == 1.5:
# k = 768
# elif scale_factor == 1:
# k = 512
# else:
# k = 256
# Get shufflenet architecture
shufflenetv2 = shufflenet_v2.ShuffleNetV2(bottleneck_ratio=scale_factor,
input_shape=(img_height, img_width, img_channels),
include_top=False)
FeatureExtractor = Model(inputs=shufflenetv2.input, outputs=shufflenetv2.get_layer('stage3/block8/concat_1').output)
# Stage 3 last block unit
shuffle_unit13 = FeatureExtractor(x1)
layer = utils.shuffle_unit(shuffle_unit13, out_channels=out_channels_in_stage[4-1], strides=2,
bottleneck_ratio=scale_factor, stage=4, block=1)
conv18 = Conv2D(k, kernel_size=1, padding='same', strides=1, name='1x1conv5_out', activation=relu6)(layer)
conv19_2 = utils.shuffle_unit(conv18, out_channels=512, strides=2,
bottleneck_ratio=scale_factor, stage=5, block=1)
conv20_2 = utils.shuffle_unit(conv19_2, out_channels=256, strides=2,
bottleneck_ratio=scale_factor, stage=5, block=2)
conv21_2 = utils.shuffle_unit(conv20_2, out_channels=256, strides=2,
bottleneck_ratio=scale_factor, stage=5, block=3)
conv22_2 = utils.shuffle_unit(conv21_2, out_channels=128, strides=2,
bottleneck_ratio=scale_factor, stage=5, block=4)
### Build the convolutional predictor layers on top of the base network
# We precidt `n_classes` confidence values for each box, hence the confidence predictors have depth `n_boxes * n_classes`
# Output shape of the confidence layers: `(batch, height, width, n_boxes * n_classes)`
conv13_mbox_conf = Conv2D(n_boxes[0] * n_classes, (3, 3), padding='same', name='conv13_mbox_conf')(shuffle_unit13)
conv18_mbox_conf = Conv2D(n_boxes[1] * n_classes, (3, 3), padding='same', name='conv18_mbox_conf')(conv18)
conv19_2_mbox_conf = Conv2D(n_boxes[2] * n_classes, (3, 3), padding='same', name='conv19_2_mbox_conf')(conv19_2)
conv20_2_mbox_conf = Conv2D(n_boxes[3] * n_classes, (3, 3), padding='same', name='conv20_2_mbox_conf')(conv20_2)
conv21_2_mbox_conf = Conv2D(n_boxes[4] * n_classes, (3, 3), padding='same', name='conv21_2_mbox_conf')(conv21_2)
conv22_2_mbox_conf = Conv2D(n_boxes[5] * n_classes, (3, 3), padding='same', name='conv22_2_mbox_conf')(conv22_2)
# We predict 4 box coordinates for each box, hence the localization predictors have depth `n_boxes * 4`
# Output shape of the localization layers: `(batch, height, width, n_boxes * 4)`
conv13_mbox_loc = Conv2D(n_boxes[0] * 4, (3, 3), padding='same', name='conv13_mbox_loc')(shuffle_unit13)
conv18_mbox_loc = Conv2D(n_boxes[1] * 4, (3, 3), padding='same', name='conv18_mbox_loc')(conv18)
conv19_2_mbox_loc = Conv2D(n_boxes[2] * 4, (3, 3), padding='same', name='conv19_2_mbox_loc')(conv19_2)
conv20_2_mbox_loc = Conv2D(n_boxes[3] * 4, (3, 3), padding='same', name='conv20_2_mbox_loc')(conv20_2)
conv21_2_mbox_loc = Conv2D(n_boxes[4] * 4, (3, 3), padding='same', name='conv21_2_mbox_loc')(conv21_2)
conv22_2_mbox_loc = Conv2D(n_boxes[5] * 4, (3, 3), padding='same', name='conv22_2_mbox_loc')(conv22_2)
### Generate the anchor boxes (called "priors" in the original Caffe/C++ implementation, so I'll keep their layer names)
# Output shape of anchors: `(batch, height, width, n_boxes, 8)`
conv13_mbox_priorbox = AnchorBoxes(img_height, img_width, this_scale=scales[0], next_scale=scales[1], aspect_ratios=aspect_ratios[0],
two_boxes_for_ar1=two_boxes_for_ar1, this_steps=steps[0], this_offsets=offsets[0], clip_boxes=clip_boxes,
variances=variances, coords=coords, normalize_coords=normalize_coords, name='conv13_mbox_priorbox')(conv13_mbox_loc)
conv18_mbox_priorbox = AnchorBoxes(img_height, img_width, this_scale=scales[1], next_scale=scales[2], aspect_ratios=aspect_ratios[1],
two_boxes_for_ar1=two_boxes_for_ar1, this_steps=steps[1], this_offsets=offsets[1], clip_boxes=clip_boxes,
variances=variances, coords=coords, normalize_coords=normalize_coords, name='conv18_mbox_priorbox')(conv18_mbox_loc)
conv19_2_mbox_priorbox = AnchorBoxes(img_height, img_width, this_scale=scales[2], next_scale=scales[3], aspect_ratios=aspect_ratios[2],
two_boxes_for_ar1=two_boxes_for_ar1, this_steps=steps[2], this_offsets=offsets[2], clip_boxes=clip_boxes,
variances=variances, coords=coords, normalize_coords=normalize_coords, name='conv19_2_mbox_priorbox')(conv19_2_mbox_loc)
conv20_2_mbox_priorbox = AnchorBoxes(img_height, img_width, this_scale=scales[3], next_scale=scales[4], aspect_ratios=aspect_ratios[3],
two_boxes_for_ar1=two_boxes_for_ar1, this_steps=steps[3], this_offsets=offsets[3], clip_boxes=clip_boxes,
variances=variances, coords=coords, normalize_coords=normalize_coords, name='conv20_2_mbox_priorbox')(conv20_2_mbox_loc)
conv21_2_mbox_priorbox = AnchorBoxes(img_height, img_width, this_scale=scales[4], next_scale=scales[5], aspect_ratios=aspect_ratios[4],
two_boxes_for_ar1=two_boxes_for_ar1, this_steps=steps[4], this_offsets=offsets[4], clip_boxes=clip_boxes,
variances=variances, coords=coords, normalize_coords=normalize_coords, name='conv21_2_mbox_priorbox')(conv21_2_mbox_loc)
conv22_2_mbox_priorbox = AnchorBoxes(img_height, img_width, this_scale=scales[5], next_scale=scales[6], aspect_ratios=aspect_ratios[5],
two_boxes_for_ar1=two_boxes_for_ar1, this_steps=steps[5], this_offsets=offsets[5], clip_boxes=clip_boxes,
variances=variances, coords=coords, normalize_coords=normalize_coords, name='conv22_2_mbox_priorbox')(conv22_2_mbox_loc)
### Reshape
# Reshape the class predictions, yielding 3D tensors of shape `(batch, height * width * n_boxes, n_classes)`
# We want the classes isolated in the last axis to perform softmax on them
conv13_mbox_conf_reshape = Reshape((-1, n_classes), name='conv13_mbox_conf_reshape')(conv13_mbox_conf)
conv18_mbox_conf_reshape = Reshape((-1, n_classes), name='conv18_mbox_conf_reshape')(conv18_mbox_conf)
conv19_2_mbox_conf_reshape = Reshape((-1, n_classes), name='conv19_2_mbox_conf_reshape')(conv19_2_mbox_conf)
conv20_2_mbox_conf_reshape = Reshape((-1, n_classes), name='conv20_2_mbox_conf_reshape')(conv20_2_mbox_conf)
conv21_2_mbox_conf_reshape = Reshape((-1, n_classes), name='conv21_2_mbox_conf_reshape')(conv21_2_mbox_conf)
conv22_2_mbox_conf_reshape = Reshape((-1, n_classes), name='conv22_2_mbox_conf_reshape')(conv22_2_mbox_conf)
# Reshape the box predictions, yielding 3D tensors of shape `(batch, height * width * n_boxes, 4)`
# We want the four box coordinates isolated in the last axis to compute the smooth L1 loss
conv13_mbox_loc_reshape = Reshape((-1, 4), name='conv13_mbox_loc_reshape')(conv13_mbox_loc)
conv18_mbox_loc_reshape = Reshape((-1, 4), name='conv18_mbox_loc_reshape')(conv18_mbox_loc)
conv19_2_mbox_loc_reshape = Reshape((-1, 4), name='conv19_2_mbox_loc_reshape')(conv19_2_mbox_loc)
conv20_2_mbox_loc_reshape = Reshape((-1, 4), name='conv20_2_mbox_loc_reshape')(conv20_2_mbox_loc)
conv21_2_mbox_loc_reshape = Reshape((-1, 4), name='conv21_2_mbox_loc_reshape')(conv21_2_mbox_loc)
conv22_2_mbox_loc_reshape = Reshape((-1, 4), name='conv22_2_mbox_loc_reshape')(conv22_2_mbox_loc)
# Reshape the anchor box tensors, yielding 3D tensors of shape `(batch, height * width * n_boxes, 8)`
conv13_mbox_priorbox_reshape = Reshape((-1, 8), name='conv13_mbox_priorbox_reshape')(conv13_mbox_priorbox)
conv18_mbox_priorbox_reshape = Reshape((-1, 8), name='conv18_mbox_priorbox_reshape')(conv18_mbox_priorbox)
conv19_2_mbox_priorbox_reshape = Reshape((-1, 8), name='conv19_2_mbox_priorbox_reshape')(conv19_2_mbox_priorbox)
conv20_2_mbox_priorbox_reshape = Reshape((-1, 8), name='conv20_2_mbox_priorbox_reshape')(conv20_2_mbox_priorbox)
conv21_2_mbox_priorbox_reshape = Reshape((-1, 8), name='conv21_2_mbox_priorbox_reshape')(conv21_2_mbox_priorbox)
conv22_2_mbox_priorbox_reshape = Reshape((-1, 8), name='conv22_2_mbox_priorbox_reshape')(conv22_2_mbox_priorbox)
### Concatenate the predictions from the different layers
# Axis 0 (batch) and axis 2 (n_classes or 4, respectively) are identical for all layer predictions,
# so we want to concatenate along axis 1, the number of boxes per layer
# Output shape of `mbox_conf`: (batch, n_boxes_total, n_classes)
mbox_conf = Concatenate(axis=1, name='mbox_conf')([conv13_mbox_conf_reshape,
conv18_mbox_conf_reshape,
conv19_2_mbox_conf_reshape,
conv20_2_mbox_conf_reshape,
conv21_2_mbox_conf_reshape,
conv22_2_mbox_conf_reshape])
# Output shape of `mbox_loc`: (batch, n_boxes_total, 4)
mbox_loc = Concatenate(axis=1, name='mbox_loc')([conv13_mbox_loc_reshape,
conv18_mbox_loc_reshape,
conv19_2_mbox_loc_reshape,
conv20_2_mbox_loc_reshape,
conv21_2_mbox_loc_reshape,
conv22_2_mbox_loc_reshape])
# Output shape of `mbox_priorbox`: (batch, n_boxes_total, 8)
mbox_priorbox = Concatenate(axis=1, name='mbox_priorbox')([conv13_mbox_priorbox_reshape,
conv18_mbox_priorbox_reshape,
conv19_2_mbox_priorbox_reshape,
conv20_2_mbox_priorbox_reshape,
conv21_2_mbox_priorbox_reshape,
conv22_2_mbox_priorbox_reshape])
# The box coordinate predictions will go into the loss function just the way they are,
# but for the class predictions, we'll apply a softmax activation layer first
mbox_conf_softmax = Activation('softmax', name='mbox_conf_softmax')(mbox_conf)
# Concatenate the class and box predictions and the anchors to one large predictions vector
# Output shape of `predictions`: (batch, n_boxes_total, n_classes + 4 + 8)
predictions = Concatenate(axis=2, name='predictions')([mbox_conf_softmax, mbox_loc, mbox_priorbox])
if mode == 'training':
model = Model(inputs=x, outputs=predictions)
elif mode == 'inference':
decoded_predictions = DecodeDetections(confidence_thresh=confidence_thresh,
iou_threshold=iou_threshold,
top_k=top_k,
nms_max_output_size=nms_max_output_size,
coords=coords,
# normalize_coords=normalize_coords, #change this parameter for inference
normalize_coords=False,
img_height=img_height,
img_width=img_width,
name='decoded_predictions')(predictions)
model = Model(inputs=x, outputs=decoded_predictions)
elif mode == 'inference_fast':
decoded_predictions = DecodeDetectionsFast(confidence_thresh=confidence_thresh,
iou_threshold=iou_threshold,
top_k=top_k,
nms_max_output_size=nms_max_output_size,
coords=coords,
normalize_coords=normalize_coords,
img_height=img_height,
img_width=img_width,
name='decoded_predictions')(predictions)
model = Model(inputs=x, outputs=decoded_predictions)
else:
raise ValueError("`mode` must be one of 'training', 'inference' or 'inference_fast', but received '{}'.".format(mode))
return model
|
py | 1a3bfd279323d9ac15ad32e9b227e53cad158683 | from vit.formatter.scheduled import Scheduled
class ScheduledRelative(Scheduled):
def format_datetime(self, scheduled, task):
return self.relative(scheduled)
|
py | 1a3bfd3956dbcd55eb0d8c3b5bb5b773f2521a0f | #! /usr/bin/env python
"""
Copyright 2015-2018 Jacob M. Graving <[email protected]>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import cv2
import numpy as np
import glob
class ImageReader:
'''Read images in batches.
Parameters
----------
path: str
Glob path to the images.
batch_size: int, default = 1
Batch size for reading frames
framerate: float, default = None
Video framerate for determining timestamps
for each frame. If None, timestamps will
equal frame number.
gray: bool, default = False
If gray, return only the middle channel
'''
def __init__(self, path, batch_size=1, framerate=None, gray=False):
#if isinstance(path, str):
# if os.path.exists(path):
# self.path = path
# else:
# raise ValueError('file or path does not exist')
#else:
# raise TypeError('path must be str')
self.path = path
self.image_paths = glob.glob(path)
self.batch_size = batch_size
self.n_frames = len(self.image_paths)
if framerate:
self.timestep = 1. / framerate
else:
self.timestep = 1.
test_images = cv2.imread(self.image_paths[0])
self.height = test_images.shape[0]
self.width = test_images.shape[1]
self.shape = (self.height, self.width)
self.gray = gray
self.idx = 0
def read(self, idx):
''' Read one frame
Returns
-------
frame: array
Image is returned of the frame if a frame exists.
Otherwise, return None.
'''
frame = cv2.imread(self.image_paths[idx])
if self.gray:
frame = frame[..., 1][..., None]
return idx, frame
def read_batch(self, idx0, idx1):
''' Read in a batch of frames.
Returns
-------
frames_idx: array
A batch of frames from the video.
frames: array
A batch of frames from the video.
'''
frames = []
frames_idx = []
for idx in range(idx0, idx1):
frame = self.read(idx)
frame_idx, frame = frame
frames.append(frame)
frames_idx.append(frame_idx)
if len(frames) == 1:
frames = frames[0][None,]
frames_idx = np.array(frames_idx)
timestamps = frames_idx * self.timestep
elif len(frames) > 1:
frames = np.stack(frames)
frames_idx = np.array(frames_idx)
timestamps = frames_idx * self.timestep
return frames, frames_idx, timestamps
def __len__(self):
return int(np.ceil(self.n_frames / float(self.batch_size)))
def __getitem__(self, index):
if isinstance(index, (int, np.integer)):
idx0 = index * self.batch_size
idx1 = (index + 1) * self.batch_size
else:
raise NotImplementedError
return self.read_batch(idx0, idx1)
def __next__(self):
if self.idx < len(self):
output = self.__getitem__(self.idx)
self.idx += 1
return output
else:
self.idx = 0
StopIteration
|
py | 1a3bfd458001b33a58ab72d36f012fe0948bc541 | from typing import Callable, Iterable
def for_each(
fn: Callable,
iterable: Iterable) -> None:
"""
Apply a function with side effects to all elements of a collection.
:param fn: the function to be applied
:param iterable: collection of elements
:return: None
"""
for item in iterable:
fn(item)
def log_start(
category: str,
msg: str) -> None:
click.echo(f"[{category.upper()}] > {msg}...")
def log_end() -> None:
click.echo()
|
py | 1a3bfdab79d35b0b670ba66f1b974839179332ca | """
Web Map Tile Service time dimension demonstration
-------------------------------------------------
This example further demonstrates WMTS support within cartopy. Optional
keyword arguments can be supplied to the OGC WMTS 'gettile' method. This
allows for the specification of the 'time' dimension for a WMTS layer
which supports it.
The example shows satellite imagery retrieved from NASA's Global Imagery
Browse Services for 5th Feb 2016. A true color MODIS image is shown on
the left, with the MODIS false color 'snow RGB' shown on the right.
"""
__tags__ = ['Web services']
import matplotlib.pyplot as plt
import matplotlib.patheffects as PathEffects
from owslib.wmts import WebMapTileService
import cartopy.crs as ccrs
def main():
# URL of NASA GIBS
URL = 'http://gibs.earthdata.nasa.gov/wmts/epsg4326/best/wmts.cgi'
wmts = WebMapTileService(URL)
# Layers for MODIS true color and snow RGB
layers = ['MODIS_Terra_SurfaceReflectance_Bands143',
'MODIS_Terra_CorrectedReflectance_Bands367']
date_str = '2016-02-05'
# Plot setup
plot_CRS = ccrs.Mercator()
geodetic_CRS = ccrs.Geodetic()
x0, y0 = plot_CRS.transform_point(4.6, 43.1, geodetic_CRS)
x1, y1 = plot_CRS.transform_point(11.0, 47.4, geodetic_CRS)
ysize = 8
xsize = 2 * ysize * (x1 - x0) / (y1 - y0)
fig = plt.figure(figsize=(xsize, ysize), dpi=100)
for layer, offset in zip(layers, [0, 0.5]):
ax = fig.add_axes([offset, 0, 0.5, 1], projection=plot_CRS)
ax.set_xlim((x0, x1))
ax.set_ylim((y0, y1))
ax.add_wmts(wmts, layer, wmts_kwargs={'time': date_str})
txt = ax.text(4.7, 43.2, wmts[layer].title, fontsize=18, color='wheat',
transform=geodetic_CRS)
txt.set_path_effects([PathEffects.withStroke(linewidth=5,
foreground='black')])
plt.show()
if __name__ == '__main__':
main()
|
py | 1a3bfdd2036992da62a9adbc6fac8c7f3de4c10d | # -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Pedro Algarvio ([email protected])`
tests.integration.shell.master
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
# Import python libs
from __future__ import absolute_import
import os
import signal
import shutil
# Import 3rd-party libs
import yaml
# Import salt libs
import salt.utils
# Import salt test libs
import tests.integration.utils
from tests.support.case import ShellCase
from tests.support.paths import TMP
from tests.support.mixins import ShellCaseCommonTestsMixin
from tests.integration.utils import testprogram
class MasterTest(ShellCase, testprogram.TestProgramCase, ShellCaseCommonTestsMixin):
_call_binary_ = 'salt-master'
def test_issue_7754(self):
old_cwd = os.getcwd()
config_dir = os.path.join(TMP, 'issue-7754')
if not os.path.isdir(config_dir):
os.makedirs(config_dir)
os.chdir(config_dir)
config_file_name = 'master'
pid_path = os.path.join(config_dir, '{0}.pid'.format(config_file_name))
with salt.utils.fopen(self.get_config_file_path(config_file_name), 'r') as fhr:
config = yaml.load(fhr.read())
config['root_dir'] = config_dir
config['log_file'] = 'file:///tmp/log/LOG_LOCAL3'
config['ret_port'] = config['ret_port'] + 10
config['publish_port'] = config['publish_port'] + 10
with salt.utils.fopen(os.path.join(config_dir, config_file_name), 'w') as fhw:
fhw.write(
yaml.dump(config, default_flow_style=False)
)
ret = self.run_script(
self._call_binary_,
'--config-dir {0} --pid-file {1} -l debug'.format(
config_dir,
pid_path
),
timeout=5,
catch_stderr=True,
with_retcode=True
)
# Now kill it if still running
if os.path.exists(pid_path):
with salt.utils.fopen(pid_path) as fhr:
try:
os.kill(int(fhr.read()), signal.SIGKILL)
except OSError:
pass
try:
self.assertFalse(os.path.isdir(os.path.join(config_dir, 'file:')))
finally:
self.chdir(old_cwd)
if os.path.isdir(config_dir):
shutil.rmtree(config_dir)
def test_exit_status_unknown_user(self):
'''
Ensure correct exit status when the master is configured to run as an unknown user.
'''
master = testprogram.TestDaemonSaltMaster(
name='unknown_user',
configs={'master': {'map': {'user': 'some_unknown_user_xyz'}}},
parent_dir=self._test_dir,
)
# Call setup here to ensure config and script exist
master.setup()
stdout, stderr, status = master.run(
args=['-d'],
catch_stderr=True,
with_retcode=True,
)
try:
self.assert_exit_status(
status, 'EX_NOUSER',
message='unknown user not on system',
stdout=stdout,
stderr=tests.integration.utils.decode_byte_list(stderr)
)
finally:
# Although the start-up should fail, call shutdown() to set the
# internal _shutdown flag and avoid the registered atexit calls to
# cause timeout exeptions and respective traceback
master.shutdown()
# pylint: disable=invalid-name
def test_exit_status_unknown_argument(self):
'''
Ensure correct exit status when an unknown argument is passed to salt-master.
'''
master = testprogram.TestDaemonSaltMaster(
name='unknown_argument',
parent_dir=self._test_dir,
)
# Call setup here to ensure config and script exist
master.setup()
stdout, stderr, status = master.run(
args=['-d', '--unknown-argument'],
catch_stderr=True,
with_retcode=True,
)
try:
self.assert_exit_status(
status, 'EX_USAGE',
message='unknown argument',
stdout=stdout,
stderr=tests.integration.utils.decode_byte_list(stderr)
)
finally:
# Although the start-up should fail, call shutdown() to set the
# internal _shutdown flag and avoid the registered atexit calls to
# cause timeout exeptions and respective traceback
master.shutdown()
def test_exit_status_correct_usage(self):
'''
Ensure correct exit status when salt-master starts correctly.
'''
master = testprogram.TestDaemonSaltMaster(
name='correct_usage',
parent_dir=self._test_dir,
)
# Call setup here to ensure config and script exist
master.setup()
stdout, stderr, status = master.run(
args=['-d'],
catch_stderr=True,
with_retcode=True,
)
try:
self.assert_exit_status(
status, 'EX_OK',
message='correct usage',
stdout=stdout,
stderr=tests.integration.utils.decode_byte_list(stderr)
)
finally:
master.shutdown(wait_for_orphans=3)
# Do the test again to check does master shut down correctly
# **Due to some underlying subprocessing issues with Minion._thread_return, this
# part of the test has been commented out. Once these underlying issues have
# been addressed, this part of the test should be uncommented. Work for this
# issue is being tracked in https://github.com/saltstack/salt-jenkins/issues/378
# stdout, stderr, status = master.run(
# args=['-d'],
# catch_stderr=True,
# with_retcode=True,
# )
# try:
# self.assert_exit_status(
# status, 'EX_OK',
# message='correct usage',
# stdout=stdout,
# stderr=tests.integration.utils.decode_byte_list(stderr)
# )
# finally:
# master.shutdown(wait_for_orphans=3)
|
py | 1a3c00806938db996238a7d14f7bb41152a42379 | """Create a camera asset."""
import bpy
from openpype.pipeline import legacy_io
from openpype.hosts.blender.api import plugin, lib, ops
from openpype.hosts.blender.api.pipeline import AVALON_INSTANCES
class CreateCamera(plugin.Creator):
"""Polygonal static geometry"""
name = "cameraMain"
label = "Camera"
family = "camera"
icon = "video-camera"
def process(self):
""" Run the creator on Blender main thread"""
mti = ops.MainThreadItem(self._process)
ops.execute_in_main_thread(mti)
def _process(self):
# Get Instance Container or create it if it does not exist
instances = bpy.data.collections.get(AVALON_INSTANCES)
if not instances:
instances = bpy.data.collections.new(name=AVALON_INSTANCES)
bpy.context.scene.collection.children.link(instances)
# Create instance object
asset = self.data["asset"]
subset = self.data["subset"]
name = plugin.asset_name(asset, subset)
camera = bpy.data.cameras.new(subset)
camera_obj = bpy.data.objects.new(subset, camera)
instances.objects.link(camera_obj)
asset_group = bpy.data.objects.new(name=name, object_data=None)
asset_group.empty_display_type = 'SINGLE_ARROW'
instances.objects.link(asset_group)
self.data['task'] = legacy_io.Session.get('AVALON_TASK')
print(f"self.data: {self.data}")
lib.imprint(asset_group, self.data)
if (self.options or {}).get("useSelection"):
bpy.context.view_layer.objects.active = asset_group
selected = lib.get_selection()
for obj in selected:
obj.select_set(True)
selected.append(asset_group)
bpy.ops.object.parent_set(keep_transform=True)
else:
plugin.deselect_all()
camera_obj.select_set(True)
asset_group.select_set(True)
bpy.context.view_layer.objects.active = asset_group
bpy.ops.object.parent_set(keep_transform=True)
return asset_group
|
py | 1a3c02262313e410c4aaf618d6af35959e53107b | import os
import csv
import httpagentparser
import random
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "LaasFrontEnd.settings")
from django.contrib.auth.models import User
from FrontEnd.models import *
def ImportBrowserStrings():
print "Importing Browser Data..."
with open('DBSetup/browser_strings.csv', 'r') as f:
browserReader = csv.reader(f)
browsers = []
print "Parsing Csv..."
for row in browserReader:
browsers.append(
(httpagentparser.parse(row[2]),
row[2])
)
#now create 100 link clicks on twitter
link = RedirectLink.objects.get(pk=1)
print "Creating clicks..."
stats = []
for i in range(100):
stats.append(LinkStat(Link=link,
IpAddress='192.186.1.1',
CountryCode='USA',
Country='United States',
Referer='t.co'))
LinkStat.objects.bulk_create(stats)
agents = []
for stat in stats:
browser = random.choice(browsers)
agents.append(LinkAgentType(Stat=stat, AgentType=))
|
py | 1a3c0304b00f4b266256a108f2df652d13f92390 | """The tests for the WUnderground platform."""
import unittest
from homeassistant.components.sensor import wunderground
from homeassistant.const import TEMP_CELSIUS, LENGTH_INCHES
from tests.common import get_test_home_assistant
VALID_CONFIG_PWS = {
'platform': 'wunderground',
'api_key': 'foo',
'pws_id': 'bar',
'monitored_conditions': [
'weather', 'feelslike_c', 'alerts', 'elevation', 'location'
]
}
VALID_CONFIG = {
'platform': 'wunderground',
'api_key': 'foo',
'monitored_conditions': [
'weather', 'feelslike_c', 'alerts', 'elevation', 'location',
'weather_1d_metric', 'precip_1d_in'
]
}
INVALID_CONFIG = {
'platform': 'wunderground',
'api_key': 'BOB',
'pws_id': 'bar',
'lang': 'foo',
'monitored_conditions': [
'weather', 'feelslike_c', 'alerts'
]
}
FEELS_LIKE = '40'
WEATHER = 'Clear'
HTTPS_ICON_URL = 'https://icons.wxug.com/i/c/k/clear.gif'
ALERT_MESSAGE = 'This is a test alert message'
FORECAST_TEXT = 'Mostly Cloudy. Fog overnight.'
PRECIP_IN = 0.03
def mocked_requests_get(*args, **kwargs):
"""Mock requests.get invocations."""
class MockResponse:
"""Class to represent a mocked response."""
def __init__(self, json_data, status_code):
"""Initialize the mock response class."""
self.json_data = json_data
self.status_code = status_code
def json(self):
"""Return the json of the response."""
return self.json_data
if str(args[0]).startswith('http://api.wunderground.com/api/foo/'):
return MockResponse({
"response": {
"version": "0.1",
"termsofService":
"http://www.wunderground.com/weather/api/d/terms.html",
"features": {
"conditions": 1,
"alerts": 1,
"forecast": 1,
}
}, "current_observation": {
"image": {
"url":
'http://icons.wxug.com/graphics/wu2/logo_130x80.png',
"title": "Weather Underground",
"link": "http://www.wunderground.com"
},
"feelslike_c": FEELS_LIKE,
"weather": WEATHER,
"icon_url": 'http://icons.wxug.com/i/c/k/clear.gif',
"display_location": {
"city": "Holly Springs",
"country": "US",
"full": "Holly Springs, NC"
},
"observation_location": {
"elevation": "413 ft",
"full": "Twin Lake, Holly Springs, North Carolina"
},
}, "alerts": [
{
"type": 'FLO',
"description": "Areal Flood Warning",
"date": "9:36 PM CDT on September 22, 2016",
"expires": "10:00 AM CDT on September 23, 2016",
"message": ALERT_MESSAGE,
},
], "forecast": {
"txt_forecast": {
"date": "22:35 CEST",
"forecastday": [
{
"period": 0,
"icon_url":
"http://icons.wxug.com/i/c/k/clear.gif",
"title": "Tuesday",
"fcttext": FORECAST_TEXT,
"fcttext_metric": FORECAST_TEXT,
"pop": "0"
},
],
}, "simpleforecast": {
"forecastday": [
{
"date": {
"pretty": "19:00 CEST 4. Duben 2017",
},
"period": 1,
"high": {
"fahrenheit": "56",
"celsius": "13",
},
"low": {
"fahrenheit": "43",
"celsius": "6",
},
"conditions": "Možnost deště",
"icon_url":
"http://icons.wxug.com/i/c/k/chancerain.gif",
"qpf_allday": {
"in": PRECIP_IN,
"mm": 1,
},
"maxwind": {
"mph": 0,
"kph": 0,
"dir": "",
"degrees": 0,
},
"avewind": {
"mph": 0,
"kph": 0,
"dir": "severní",
"degrees": 0
}
},
],
},
},
}, 200)
else:
return MockResponse({
"response": {
"version": "0.1",
"termsofService":
"http://www.wunderground.com/weather/api/d/terms.html",
"features": {},
"error": {
"type": "keynotfound",
"description": "this key does not exist"
}
}
}, 200)
class TestWundergroundSetup(unittest.TestCase):
"""Test the WUnderground platform."""
# pylint: disable=invalid-name
DEVICES = []
def add_devices(self, devices):
"""Mock add devices."""
for device in devices:
self.DEVICES.append(device)
def setUp(self):
"""Initialize values for this testcase class."""
self.DEVICES = []
self.hass = get_test_home_assistant()
self.key = 'foo'
self.config = VALID_CONFIG_PWS
self.lat = 37.8267
self.lon = -122.423
self.hass.config.latitude = self.lat
self.hass.config.longitude = self.lon
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
@unittest.mock.patch('requests.get', side_effect=mocked_requests_get)
def test_setup(self, req_mock):
"""Test that the component is loaded if passed in PWS Id."""
self.assertTrue(
wunderground.setup_platform(self.hass, VALID_CONFIG_PWS,
self.add_devices, None))
self.assertTrue(
wunderground.setup_platform(self.hass, VALID_CONFIG,
self.add_devices, None))
self.assertTrue(
wunderground.setup_platform(self.hass, INVALID_CONFIG,
self.add_devices, None))
@unittest.mock.patch('requests.get', side_effect=mocked_requests_get)
def test_sensor(self, req_mock):
"""Test the WUnderground sensor class and methods."""
wunderground.setup_platform(self.hass, VALID_CONFIG, self.add_devices,
None)
for device in self.DEVICES:
device.update()
self.assertTrue(str(device.name).startswith('PWS_'))
if device.name == 'PWS_weather':
self.assertEqual(HTTPS_ICON_URL, device.entity_picture)
self.assertEqual(WEATHER, device.state)
self.assertIsNone(device.unit_of_measurement)
elif device.name == 'PWS_alerts':
self.assertEqual(1, device.state)
self.assertEqual(ALERT_MESSAGE,
device.device_state_attributes['Message'])
self.assertIsNone(device.entity_picture)
elif device.name == 'PWS_location':
self.assertEqual('Holly Springs, NC', device.state)
elif device.name == 'PWS_elevation':
self.assertEqual('413', device.state)
elif device.name == 'PWS_feelslike_c':
self.assertIsNone(device.entity_picture)
self.assertEqual(FEELS_LIKE, device.state)
self.assertEqual(TEMP_CELSIUS, device.unit_of_measurement)
elif device.name == 'PWS_weather_1d_metric':
self.assertEqual(FORECAST_TEXT, device.state)
else:
self.assertEqual(device.name, 'PWS_precip_1d_in')
self.assertEqual(PRECIP_IN, device.state)
self.assertEqual(LENGTH_INCHES, device.unit_of_measurement)
|
py | 1a3c0351a1cd5696d8ab27991f8941481f635607 | # -*- coding: utf-8 -*-
import torch as th
import torch.nn as nn
from leibniz.nn.layer.simam import SimAM
from leibniz.nn.layer.cbam import CBAM
class BasicBlock(nn.Module):
def __init__(self, in_channel, out_channel, step, relu, conv, reduction=16):
super(BasicBlock, self).__init__()
self.step = step
self.relu = relu
self.conv1 = conv(in_channel, in_channel, kernel_size=3, stride=1, padding=1)
self.conv2 = conv(in_channel, out_channel, kernel_size=3, stride=1, padding=1)
self.simam = SimAM(out_channel, reduction)
def forward(self, x):
y = self.conv1(x)
y = self.relu(y)
y = self.conv2(y)
y = self.simam(y)
return y
class Bottleneck(nn.Module):
def __init__(self, in_channel, out_channel, step, relu, conv, reduction=16):
super(Bottleneck, self).__init__()
self.step = step
self.relu = relu
hidden = min(in_channel, out_channel) // 4 + 1
self.conv1 = conv(in_channel, hidden, kernel_size=1, bias=False)
self.conv2 = conv(hidden, hidden, kernel_size=3, bias=False, padding=1)
self.conv3 = conv(hidden, out_channel, kernel_size=1, bias=False)
self.simam = SimAM(out_channel, reduction)
def forward(self, x):
y = self.conv1(x)
y = self.relu(y)
y = self.conv2(y)
y = self.relu(y)
y = self.conv3(y)
y = self.simam(y)
return y
class HyperBasic(nn.Module):
extension = 1
least_required_dim = 1
def __init__(self, dim, step, ix, tx, relu, conv, reduction=16):
super(HyperBasic, self).__init__()
self.dim = dim
self.step = step
self.ix = ix
self.tx = tx
self.input = BasicBlock(dim, 2 * dim, step, relu, conv, reduction=reduction)
self.output = BasicBlock(7 * dim, dim, step, relu, conv, reduction=reduction)
def forward(self, x):
r = self.input(x)
velo = r[:, :self.dim]
theta = r[:, self.dim:]
u = velo * th.cos(theta)
v = velo * th.sin(theta)
y1 = x * (1 + v * self.step) + u * self.step
y2 = x * (1 + u * self.step) - v * self.step
y3 = x * (1 - v * self.step) - u * self.step
y4 = x * (1 - u * self.step) + v * self.step
ys = th.cat([y1, y2, y3, y4, x, velo, theta], dim=1)
return x * (1 + self.output(ys) * self.step)
class HyperBottleneck(nn.Module):
extension = 2
least_required_dim = 1
def __init__(self, dim, step, ix, tx, relu, conv, reduction=16):
super(HyperBottleneck, self).__init__()
self.dim = dim
self.step = step
self.ix = ix
self.tx = tx
self.input = Bottleneck(dim, 2 * dim, step, relu, conv, reduction=reduction)
self.output = Bottleneck(7 * dim, dim, step, relu, conv, reduction=reduction)
def forward(self, x):
r = self.input(x)
velo = r[:, :self.dim]
theta = r[:, self.dim:]
u = velo * th.cos(theta)
v = velo * th.sin(theta)
y1 = x * (1 + v * self.step) + u * self.step
y2 = x * (1 + u * self.step) - v * self.step
y3 = x * (1 - v * self.step) - u * self.step
y4 = x * (1 - u * self.step) + v * self.step
ys = th.cat([y1, y2, y3, y4, x, velo, theta], dim=1)
return x * (1 + self.output(ys) * self.step)
|
py | 1a3c05250c69f815101d247e7d15eceb0e4262d6 | # -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import division
from six.moves.urllib import request
import glob
import os
import platform # Mac or Linux special for uncompress command
import errno
import sys
import numpy as np
import codecs
import re
import subprocess
import sys
import tarfile
import matplotlib.pyplot as plt
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
class xerion(object):
"""Managing xerion datafiles.
Read datafiles ending with '-nsyl.ex' and '-syl.ex' from `xerion_prefix/datadir`, and
SAve them to `pkl_dir` as pickle files.
Usage:
```python
print(xerion().Orthography) # for input data format
print(xerion().Phonology) # for output data format
X = xerion().input
y = xerion().output
```
The original datafiles can be obtained from http://www.cnbc.cmu.edu/~plaut/xerion/
"""
def __init__(self,
data='SM-nsyl',
datadir='./data/',
pkl_dir='./data/',
remake=False, readall=False, saveall=False,
forceDownload=False):
self.module_path = os.path.dirname(__file__)
self.xerion_prefix = 'nets/share/'
self.datadir = datadir # + self.xerion_prefix
self.pkl_dir = pkl_dir
self.url_base = 'http://www.cnbc.cmu.edu/~plaut/xerion/'
self.url_file = 'xerion-3.1-nets-share.tar.gz'
self.origfile_size = 1026691
self.syl_files = ['SM-syl.ex', 'besnerNW-syl.ex', 'bodies-syl.ex', 'bodiesNW-syl.ex',
'friedmanNW-syl.ex', 'glushkoNW-syl.ex', 'graphemes-syl.ex',
'jared1-syl.ex', 'jared2-syl.ex', 'megaNW-syl.ex',
'pureNW-syl.ex', 'surface-syl.ex', 'taraban-syl.ex',
'tarabanALL-syl.ex', 'tarabanEvN-syl.ex', 'tarabanNRE-syl.ex',
'vcoltheartNW-syl.ex']
self.nsyl_files = ['SM-nsyl.ex', 'besnerNW-nsyl.ex', 'glushkoNW-nsyl.ex',
'graphemes-nsyl.ex', 'jared1-nsyl.ex', 'jared2-nsyl.ex',
'markPH-nsyl.ex', 'megaNW-nsyl.ex', 'surface-nsyl.ex',
'taraban-nsyl.ex', 'tarabanALL-nsyl.ex', 'tarabanEvN-nsyl.ex',
'tarabanNRE-nsyl.ex']
self.datafilenames = [ *self.nsyl_files, *self.syl_files]
self._tags = ('#', 'seq', 'grapheme', 'phoneme', 'freq', 'tag', 'input', 'output')
self.Orthography={'onset':['Y', 'S', 'P', 'T', 'K', 'Q', 'C', 'B', 'D', 'G',
'F', 'V', 'J', 'Z', 'L', 'M', 'N', 'R', 'W', 'H',
'CH', 'GH', 'GN', 'PH', 'PS', 'RH', 'SH', 'TH', 'TS', 'WH'],
'vowel':['E', 'I', 'O', 'U', 'A', 'Y', 'AI', 'AU', 'AW', 'AY',
'EA', 'EE', 'EI', 'EU', 'EW', 'EY', 'IE', 'OA', 'OE', 'OI',
'OO', 'OU', 'OW', 'OY', 'UE', 'UI', 'UY'],
'coda':['H', 'R', 'L', 'M', 'N', 'B', 'D', 'G', 'C', 'X',
'F', 'V', '∫', 'S', 'Z', 'P', 'T', 'K', 'Q', 'BB',
'CH', 'CK', 'DD', 'DG', 'FF', 'GG', 'GH', 'GN', 'KS', 'LL',
'NG', 'NN', 'PH', 'PP', 'PS', 'RR', 'SH', 'SL', 'SS', 'TCH',
'TH', 'TS', 'TT', 'ZZ', 'U', 'E', 'ES', 'ED']}
self.Phonology={'onset':['s', 'S', 'C', 'z', 'Z', 'j', 'f', 'v', 'T', 'D',
'p', 'b', 't', 'd', 'k', 'g', 'm', 'n', 'h', 'I',
'r', 'w', 'y'],
'vowel': ['a', 'e', 'i', 'o', 'u', '@', '^', 'A', 'E', 'I',
'O', 'U', 'W', 'Y'],
'coda':['r', 'I', 'm', 'n', 'N', 'b', 'g', 'd', 'ps', 'ks',
'ts', 's', 'z', 'f', 'v', 'p', 'k', 't', 'S', 'Z',
'T', 'D', 'C', 'j']}
self.bibtex=[
'@article{SM89,',
'title={A Distributed, Developmental Model of Word Recognition and Naming},',
'auhor={Mark S. Seidenberg and James L. McClelland},',
'year={1989},',
'journal={psychological review},',
'volume={96},',
'number={4},',
'pages={523-568}}'
'}',
'@article{PMSP96,',
'title={Understanding Normal and Impaired Word Reading:',
' Computational Principles in Quasi-Regular Domains},',
'author={David C. Plaut and James L. McClelland and Mark S. Seidenberg and Karalyn Patterson},',
'year={1996},',
'volume={103},',
'number={1},',
'pages={56-115},',
'journal={psychological review}',
'}']
self.dbs = {}
if remake:
self.dbs = self.make_all()
saveall = True
if saveall == True:
self.save_all()
readall = True
if readall:
self.dbs = self.read_all()
self.dataname = data
pkl_file = self.pkl_dir + self.dataname + '.pkl'
self.db = self.load_pickle(filename=pkl_file)
self.input = self.db[self._tags.index('input')]
self.output = self.db[self._tags.index('output')]
self.freq = self.db[self._tags.index('freq')]
self.graph = self.db[self._tags.index('grapheme')]
self.phone = self.db[self._tags.index('phoneme')]
self.tag = self.db[self._tags.index('tag')]
self.dbs[self.dataname] = self.db
#def read_a_xerion_file(filename='SM-nsyl.pkl'):
# pass
def read_all(self):
"""reading data files named ening with '-nsyl.ex'."""
dbs = {}
for dname in self.datafilenames:
dname_ = re.sub('.ex', '', dname)
filename = self.pkl_dir + dname_ + '.pkl'
if not os.path.isfile(filename):
raise ValueError('{0} could not found'.format(filename))
dbs[dname_] = self.load_pickle(filename=filename)
return dbs
def save_all(self):
"""saving data files to be pickled."""
dirname = self.pkl_dir
if not os.path.exists(self.pkl_dir):
os.makedirs(self.pkl_dir)
if not os.path.exists(self.pkl_dir):
raise OSError('{} was not found'.format(self.pkl_dir))
for db in self.dbs:
dest_filename = self.pkl_dir + re.sub('.ex', '.pkl', db)
try:
with codecs.open(dest_filename, 'wb') as f:
pickle.dump(self.dbs[db], f)
except:
print('Error in processing {0}'.format(dest_filename))
def load_pickle(self, filename='SM-nsyl.pk'):
if not os.path.isfile(filename):
raise ValueError('Could not find {}'.format(filename))
with open(filename, 'rb') as f:
db = pickle.load(f)
return db
def make_all(self):
dbs = {}
for dname in self.datafilenames:
filename = self.datadir + self.xerion_prefix + dname
if not os.path.isfile(filename):
print('{0} could not found'.format(filename))
downfilename, h = self.download()
#print('downloaded file: {0}, {1}'.format(downfilename, h))
self.extractall()
inp, out, graph, phone, freq, tags = self.read_xerion(filename=filename)
dbs[dname] = [dname, '#', graph, phone, freq, tags, inp, out]
return dbs
def read_xerion(self, filename='../data/nets/share/SM-nsyl.ex'):
with codecs.open(filename,'r') as f:
lines = f.readlines()
inp_flag = False
inpbuff, outbuff, tags = {}, {}, {}
graph, phone, freq = {}, {}, {}
for i, line in enumerate(lines[1:]):
if len(line) == 0:
continue
a = line.strip().split(' ')
if line[0] == '#':
if a[0] == '#WARNING:':
continue
try:
seq = int(a[self._tags.index('seq')])
except:
continue
_graph = a[self._tags.index('grapheme')]
_phone = a[self._tags.index('phoneme')]
_freq = a[self._tags.index('freq')]
_tag = a[self._tags.index('tag')]
inp_flag = True
if not seq in inpbuff:
inpbuff[seq] = list()
outbuff[seq] = list()
graph[seq] = _graph
phone[seq] = _phone
freq[seq] = _freq
tags[seq] = _tag
continue
elif line[0] == ',':
inp_flag = False
continue
elif line[0] == ';':
inp_flag = True
continue
if inp_flag:
#print('hoge seq=', seq)
for x in a:
try:
inpbuff[seq].append(int(x))
except:
pass #print(x, end=', ')
else:
for x in a:
try:
outbuff[seq].append(int(x))
except:
pass
continue
ret_in = np.array([inpbuff[seq] for seq in inpbuff], dtype=np.int16)
ret_out = np.array([outbuff[seq] for seq in outbuff], dtype=np.int16)
ret_graph = np.array([graph[seq] for seq in graph], dtype=np.unicode_)
ret_phone = np.array([phone[seq] for seq in phone], dtype=np.unicode_)
ret_freq = np.array([freq[seq] for seq in freq], dtype=np.float32)
ret_tag = np.array([tags[seq] for seq in tags], dtype=np.unicode_)
return ret_in, ret_out, ret_graph, ret_phone, ret_freq, ret_tag
@staticmethod
def download(forcedownload=False, destdir=None):
if destdir is None:
destdir = self.datadir
if not os.path.exists(destdir):
os.mkdir(destdir)
dest_filename = destdir + self.url_file
if os.path.exists(dest_filename):
statinfo = os.stat(dest_filename)
if statinfo.st_size != self.origfile_size:
forceDownload = True
print("File {} not expected size, forcing download".format(dest_filename))
else:
print("File '{}' allready downloaded.".format(dest_filename))
if forcedownload == True or not os.path.exists(dest_filename):
print('Attempting to download: {}'.format(dest_filename))
print('From {}'.format(self.url_base + self.url_file))
fname, h = request.urlretrieve(self.url_base+self.url_file, dest_filename)
print("Downloaded '{}' successfully".format(dest_filename))
return fname, h
else:
return dest_filename, None
@staticmethod
def extractall(gzfile=None):
if gzfile is None:
gzfile, _ = self.download()
with tarfile.open(name=gzfile, mode='r:gz') as tar:
tar.extractall(path=self.datadir)
if platform.system() == 'Darwin':
cmd = '/usr/bin/uncompress'
args = self.datadir + self.xerion_prefix + '*.ex.Z'
files = glob.glob(args)
for file in sorted(files):
print(cmd, file)
try:
subprocess.Popen([cmd, file])
except:
print('cmd {0} {1} failed'.format(cmd, file))
sys.exit()
print('#extractall() completed. command:{}'.format(cmd))
else:
print('You must on Linux or Windows, Please uncompress manually')
sys.exit()
self.pkl_dir = self.datadir + self.xerion_prefix
def note(self):
print('\n\n# xerion() is the data management tool for PMSP96')
print('# The original data will be found at:',
self.url_base + self.url_file)
print('# The data format is as following:')
for l in [self.Orthography, self.Phonology]:
for x in l:
print(x, l[x])
print('\n# The bibtex format of the original papers:')
for l in self.bibtex:
print(l)
@staticmethod
def usage():
print('```python')
print('import numpy')
print('import wbai_aphasia as handson')
print()
print('from sklearn.neural_network import MLPRegressor')
print()
print('data = handson.xerion()')
print('X = np.asarray(data.input, dtype=np.float32)')
print('y = np.asarray(data.output, dtype=np.float32)')
print()
print('model = MLPRegressor()')
print('model.fit(X,y)')
print('model.score(X,y)')
print('```')
def descr(self):
fdescr_name = os.path.join(self.module_path, 'descr', 'xerion.md')
print('self.module_path={}'.format(self.module_path))
print('fdescr_name={}'.format(fdescr_name))
with codecs.open(fdescr_name, 'r') as markdownfile:
fdescr = markdownfile.read()
print(fdescr)
|
py | 1a3c05792b8407f183ed1172aed072a77a2e8990 | import mock
import os
from moto import mock_s3
from rasa.nlu import persistor, train
from tests.nlu import utilities
class Object(object):
pass
# noinspection PyPep8Naming
@mock_s3
def test_list_method_method_in_AWSPersistor(component_builder, tmpdir):
# artificially create a persisted model
_config = utilities.base_test_conf("keyword")
os.environ["BUCKET_NAME"] = "rasa-test"
os.environ["AWS_DEFAULT_REGION"] = "us-west-1"
(trained, _, persisted_path) = train(
_config,
data="data/test/demo-rasa-small.json",
path=tmpdir.strpath,
storage="aws",
component_builder=component_builder,
)
# We need to create the bucket since this is all in Moto's 'virtual' AWS
# account
awspersistor = persistor.AWSPersistor(os.environ["BUCKET_NAME"])
result = awspersistor.list_models()
assert len(result) == 1
# noinspection PyPep8Naming
@mock_s3
def test_list_models_method_raise_exeception_in_AWSPersistor():
os.environ["AWS_DEFAULT_REGION"] = "us-east-1"
awspersistor = persistor.AWSPersistor("rasa-test")
result = awspersistor.list_models()
assert result == []
# noinspection PyPep8Naming
def test_list_models_method_in_GCSPersistor():
# noinspection PyUnusedLocal
def mocked_init(self, *args, **kwargs):
self._model_dir_and_model_from_filename = lambda x: {
"blob_name": ("project", "model_name")
}[x]
self.bucket = Object()
def mocked_list_blobs():
filter_result = Object()
filter_result.name = "blob_name"
return (filter_result,)
self.bucket.list_blobs = mocked_list_blobs
with mock.patch.object(persistor.GCSPersistor, "__init__", mocked_init):
result = persistor.GCSPersistor("").list_models()
assert result == ["model_name"]
# noinspection PyPep8Naming
def test_list_models_method_raise_exeception_in_GCSPersistor():
# noinspection PyUnusedLocal
def mocked_init(self, *args, **kwargs):
self._model_dir_and_model_from_filename = lambda x: {
"blob_name": ("project", "model_name")
}[x]
self.bucket = Object()
def mocked_list_blobs():
raise ValueError
self.bucket.list_blobs = mocked_list_blobs
with mock.patch.object(persistor.GCSPersistor, "__init__", mocked_init):
result = persistor.GCSPersistor("").list_models()
assert result == []
# noinspection PyPep8Naming
def test_list_models_method_in_AzurePersistor():
# noinspection PyUnusedLocal
def mocked_init(self, *args, **kwargs):
self._model_dir_and_model_from_filename = lambda x: {
"blob_name": ("project", "model_name")
}[x]
self.blob_client = Object()
self.container_name = "test"
# noinspection PyUnusedLocal
def mocked_list_blobs(container_name, prefix=None):
filter_result = Object()
filter_result.name = "blob_name"
return (filter_result,)
self.blob_client.list_blobs = mocked_list_blobs
with mock.patch.object(persistor.AzurePersistor, "__init__", mocked_init):
result = persistor.AzurePersistor("", "", "").list_models()
assert result == ["model_name"]
# noinspection PyPep8Naming
def test_list_models_method_raise_exeception_in_AzurePersistor():
def mocked_init(self, *args, **kwargs):
self._model_dir_and_model_from_filename = lambda x: {"blob_name": ("project",)}[
x
]
self.blob_client = Object()
# noinspection PyUnusedLocal
def mocked_list_blobs(container_name, prefix=None):
raise ValueError
self.blob_client.list_blobs = mocked_list_blobs
with mock.patch.object(persistor.AzurePersistor, "__init__", mocked_init):
result = persistor.AzurePersistor("", "", "").list_models()
assert result == []
|
py | 1a3c0640b711fb17a7c7c490dedc482ad74bf58a | # Generated by Django 2.2.11 on 2020-03-23 23:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("concordia", "0045_auto_20200323_1832"),
]
operations = [
migrations.AlterField(
model_name="asset",
name="transcription_status",
field=models.CharField(
choices=[
("not_started", "Not Started"),
("in_progress", "In Progress"),
("submitted", "Needs Review"),
("completed", "Completed"),
],
db_index=True,
default="not_started",
editable=False,
max_length=20,
),
),
]
|
py | 1a3c0763f33dbc90add8fc88eeb2eadc3111409e | # -*- coding: UTF-8 -*-
# Copyright (c) 2018, Xycart
# License: MIT License
from __future__ import unicode_literals
import sys, os # standard modules
from dxfGenerator import dxfGenerator
import ConvertPingYin
SCRIPTS_DIR = os.path.dirname(os.path.abspath(__file__))
ATD_DIR = os.path.dirname(SCRIPTS_DIR)
CADINFOS_DIR = ATD_DIR + '/CadInfos/Allegro17.0'
LIBRARYS_DIR = ATD_DIR + '/Library/Allegro'
BATCH_LATEST_CMD = \
"""call @WORKDIR@/CustomVectorTextMechanicalSymbol.bat
"""
def SaveFile(string, fname):
with open(fname, "w") as textFile:
textFile.write(string)
def CreateFile(string, fname, overwrite=True):
if overwrite:
SaveFile(string, fname)
else:
if not os.path.exists(fname):
SaveFile(string, fname)
def scrGenerator(dxffile, symbolname):
scr_srcdir = CADINFOS_DIR + '/CustomVectorTextMechanicalSymbol.scr'
scr_dstdir = LIBRARYS_DIR + '/atd-' + symbolname + '/CustomVectorTextMechanicalSymbol.scr'
origfn = scr_srcdir
targfn = scr_dstdir
with open(origfn, 'r') as ifile, open(targfn, 'w') as ofile:
content = ifile.read().replace('@SYMBOL_NAME@', symbolname[:32]).replace('@IMPORT_DXFFILE@', dxffile)
ofile.write(content)
def batGenerator(scrname, symbolname):
scr_srcdir = CADINFOS_DIR + '/CustomVectorTextMechanicalSymbol.bat'
scr_dstdir = LIBRARYS_DIR + '/atd-' + symbolname + '/CustomVectorTextMechanicalSymbol.bat'
origfn = scr_srcdir
targfn = scr_dstdir
with open(origfn, 'r') as ifile, open(targfn, 'w') as ofile:
content = ifile.read().replace('@SYMBOL_NAME@', symbolname).replace('@SCR_NAME@', scrname)
ofile.write(content)
def draGenerator(dxffile, symbolname):
scrGenerator(dxffile, symbolname)
batGenerator('CustomVectorTextMechanicalSymbol', symbolname)
workdir = LIBRARYS_DIR + '/atd-' + symbolname
CreateFile(BATCH_LATEST_CMD.replace('@WORKDIR@', workdir),
LIBRARYS_DIR + '/RunLatestGenerator.bat',
overwrite=True)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
# Create Arguments
parser.add_argument('--text', '-t', dest='text',
help='The vector text you want')
args = parser.parse_args()
if args.text is None:
text = "博大精深"
else:
text = args.text.decode('gb2312')
#print sys.getdefaultencoding()
#print text
textpy = ConvertPingYin.CConvert().convert(text) # .replace('-','')
symbolname = textpy
dxf_dstdir = LIBRARYS_DIR + '/atd-' + symbolname
dxffn = dxf_dstdir + "/atd-" + symbolname + ".dxf"
expdxffn = dxffn.split('.')[0] + ".exp.dxf"
dxffile = expdxffn
dxferror = dxfGenerator(text)
if dxferror:
print ("#### Error on dxfGenerator(%s)" % text)
draGenerator(dxffile, symbolname)
|
py | 1a3c07b18274529717e8b193e9355728de768838 | from django.conf.urls import url
from django.core.urlresolvers import reverse
from wagtail.wagtailcore import hooks
from wagtail.wagtailadmin.menu import MenuItem
from baselinecore.theme.views import ActivateTheme, ThemeIndex, ThemeThumbnail, InstallTheme
@hooks.register('register_admin_menu_item')
def theme_menu():
return MenuItem('Themes', reverse('baseline-theme-index'), classnames='icon icon-edit',
order=10000)
@hooks.register('register_admin_urls')
def theme_urls():
return [
url(r'^themes/$', ThemeIndex.as_view(), name='baseline-theme-index'),
url(r'^themes/activate/$', ActivateTheme.as_view(), name='baseline-theme-activate'),
url(r'^themes/install/$', InstallTheme.as_view(), name='baseline-theme-install'),
url(r'^themes/thumb/$', ThemeThumbnail.as_view(), name='baseline-theme-thumb'),
]
|
py | 1a3c07c8059f5ee3fd377bee96d38ae03f875fbc | """GraphBLAS Scalar (SuiteSparse Only)
"""
from .base import (
lib,
ffi,
NULL,
_check,
)
from .types import _gb_from_type
__all__ = ["Scalar"]
class Scalar:
"""GraphBLAS Scalar
Used for now mostly for the `pygraphblas.Matrix.select`.
"""
__slots__ = ("_scalar", "type")
def __init__(self, s, typ):
self._scalar = s
self.type = typ
def __del__(self):
_check(lib.GxB_Scalar_free(self._scalar))
def __len__(self):
return self.nvals
def dup(self):
"""Create an duplicate Scalar from the given argument."""
new_sca = ffi.new("GxB_Scalar*")
_check(lib.GxB_Scalar_dup(new_sca, self._scalar[0]))
return self.__class__(new_sca, self.type)
@classmethod
def from_type(cls, typ):
"""Create an empty Scalar from the given type and size."""
new_sca = ffi.new("GxB_Scalar*")
_check(lib.GxB_Scalar_new(new_sca, typ._gb_type))
return cls(new_sca, typ)
@classmethod
def from_value(cls, value):
"""Create an empty Scalar from the given type and size."""
new_sca = ffi.new("GxB_Scalar*")
typ = _gb_from_type(type(value))
_check(lib.GxB_Scalar_new(new_sca, typ._gb_type))
s = cls(new_sca, typ)
s[0] = value
return s
@property
def gb_type(self):
"""Return the GraphBLAS low-level type object of the Scalar."""
return self.type._gb_type
def clear(self):
"""Clear the scalar."""
_check(lib.GxB_Scalar_clear(self._scalar[0]))
def __getitem__(self, index):
result = ffi.new(self.type._c_type + "*")
_check(
self.type._Scalar_extractElement(result, self._scalar[0]), raise_no_val=True
)
return result[0]
def __setitem__(self, index, value):
_check(
self.type._Scalar_setElement(
self._scalar[0], ffi.cast(self.type._c_type, value)
)
)
def wait(self):
_check(lib.GxB_Scalar_wait(self._scalar))
@property
def nvals(self):
"""Return the number of values in the scalar (0 or 1)."""
n = ffi.new("GrB_Index*")
_check(lib.GxB_Scalar_nvals(n, self._scalar[0]))
return n[0]
def __bool__(self):
return bool(self.nvals)
|
py | 1a3c088774a25519088015828b837c1872eb6a03 | from pathlib import Path
from typing import List, Optional
from ipywidgets import HBox, SelectMultiple
from .core import JSONType
from .mixins import TextTrainerMixin, sample_from_iterable
from .widgets import Solver, GPUIndex, Engine
alpha = "abcdefghijklmnopqrstuvwxyz0123456789,;.!?:’\“/\_@#$%^&*~`+-=<>()[]{}"
class Text(TextTrainerMixin):
def __init__(
self,
sname: str,
*,
mllib: str = "caffe",
engine: Engine = "CUDNN_SINGLE_HANDLE",
training_repo: Path,
testing_repo: Optional[Path] = None,
description: str = "Text service",
model_repo: Path = None,
host: str = "localhost",
port: int = 1234,
path: str = "",
gpuid: GPUIndex = 0,
# -- specific
regression: bool = False,
db: bool = True,
nclasses: int = -1,
ignore_label: Optional[int] = -1,
layers: List[str] = [],
dropout: float = .2,
iterations: int = 25000,
test_interval: int = 1000,
snapshot_interval: int = 1000,
base_lr: float = 0.001,
lr_policy: str = "fixed",
stepvalue: List[int] = [],
warmup_lr: float = 0.0001,
warmup_iter: int = 0,
resume: bool = False,
solver_type: Solver = "SGD",
sam : bool = False,
swa : bool = False,
lookahead : bool = False,
lookahead_steps : int = 6,
lookahead_alpha : float = 0.5,
rectified : bool = False,
decoupled_wd_periods : int = 4,
decoupled_wd_mult : float = 2.0,
lr_dropout : float = 1.0,
batch_size: int = 128,
test_batch_size: int = 32,
shuffle: bool = True,
tsplit: float = 0.2,
min_count: int = 10,
min_word_length: int = 5,
count: bool = False,
tfidf: bool = False,
sentences: bool = False,
characters: bool = False,
sequence: int = -1,
read_forward: bool = True,
alphabet: str = alpha,
sparse: bool = False,
template: Optional[str] = None,
activation: str = "relu",
embedding: bool = False,
objective: str = '',
class_weights: List[float] = [],
scale_pos_weight: float = 1.0,
autoencoder: bool = False,
lregression: bool = False,
finetune: bool = False,
weights: str = "",
iter_size: int = 1,
target_repository: str = "",
##-- new txt input conns stuff for bert and gpt2
ordered_words: bool = True,
wordpiece_tokens: bool = True,
punctuation_tokens: bool = True,
lower_case: bool =False,
word_start: str = "Ġ",
suffix_start: str = "",
##--end bert, gpt2 new stuff
embedding_size: int = 768,
freeze_traced: bool = False,
**kwargs
) -> None:
super().__init__(sname, locals())
self.train_labels = SelectMultiple(
options=[], value=[], description="Training labels", disabled=False
)
self.test_labels = SelectMultiple(
options=[], value=[], description="Testing labels", disabled=False
)
# self.testing_repo.observe(self.update_label_list, names="value")
self.training_repo.observe( # type: ignore
self.update_label_list, names="value"
)
self.train_labels.observe(self.update_train_file_list, names="value")
self.test_labels.observe(self.update_test_file_list, names="value")
self.file_list.observe(self.display_text, names="value")
self.update_label_list(())
self._img_explorer.children = [
HBox([HBox([self.train_labels, self.test_labels])]),
self.file_list,
self.output,
]
if self.characters: # type: ignore
self.db.value = True # type: ignore
if self.mllib.value == "torch":
self.db.value = False
def display_text(self, args):
self.output.clear_output()
with self.output:
for path in args["new"]:
with open(path, "r", encoding="utf-8", errors="ignore") as fh:
for i, x in enumerate(fh.readlines()):
if i == 20:
break
print(x.strip())
def update_train_file_list(self, *args):
with self.output:
if len(self.train_labels.value) == 0:
return
directory = (
Path(self.training_repo.value) / self.train_labels.value[0]
)
self.file_list.options = [
fh.as_posix()
for fh in sample_from_iterable(directory.glob("**/*"), 10)
]
self.test_labels.value = []
def update_test_file_list(self, *args):
with self.output:
if len(self.test_labels.value) == 0:
return
directory = (
Path(self.testing_repo.value) / self.test_labels.value[0]
)
self.file_list.options = [
fh.as_posix()
for fh in sample_from_iterable(directory.glob("**/*"), 10)
]
self.train_labels.value = []
def _create_parameters_input(self) -> JSONType:
return {
"connector": "txt",
"characters": self.characters.value,
"sequence": self.sequence.value,
"read_forward": self.read_forward.value,
"alphabet": self.alphabet.value,
"sparse": self.sparse.value,
"embedding": self.embedding.value,
"ordered_words": self.ordered_words.value,
"wordpiece_tokens": self.wordpiece_tokens.value,
"punctuation_tokens": self.punctuation_tokens.value,
"lower_case": self.lower_case.value,
"word_start": self.word_start.value,
"suffix_start": self.suffix_start.value,
}
def _create_parameters_mllib(self) -> JSONType:
dic = super()._create_parameters_mllib()
dic["embedding_size"] = self.embedding_size.value
dic["freeze_traced"] = self.freeze_traced.value
return dic
def _train_parameters_input(self) -> JSONType:
return {
"alphabet": self.alphabet.value,
"characters": self.characters.value,
"count": self.count.value,
"db": self.db.value,
"embedding": self.embedding.value,
"min_count": self.min_count.value,
"min_word_length": self.min_word_length.value,
"read_forward": self.read_forward.value,
"sentences": self.sentences.value,
"sequence": self.sequence.value,
"shuffle": self.shuffle.value,
"test_split": self.tsplit.value,
"tfidf": self.tfidf.value,
}
|
py | 1a3c0938ab638d1cfd963d1ca96f58089685655b | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import OrderedDict
from unittest import mock
import numpy as np
from ax.core.metric import Metric
from ax.core.objective import MultiObjective, Objective, ScalarizedObjective
from ax.core.observation import Observation, ObservationData, ObservationFeatures
from ax.core.optimization_config import OptimizationConfig
from ax.core.outcome_constraint import ComparisonOp, OutcomeConstraint
from ax.core.parameter import ChoiceParameter, ParameterType, RangeParameter
from ax.core.parameter_constraint import OrderConstraint, SumConstraint
from ax.core.search_space import SearchSpace
from ax.modelbridge.modelbridge_utils import get_bounds_and_task
from ax.modelbridge.numpy import NumpyModelBridge
from ax.models.numpy_base import NumpyModel
from ax.utils.common.testutils import TestCase
class NumpyModelBridgeTest(TestCase):
def setUp(self):
x = RangeParameter("x", ParameterType.FLOAT, lower=0, upper=1)
y = RangeParameter(
"y", ParameterType.FLOAT, lower=1, upper=2, is_fidelity=True, target_value=2
)
z = RangeParameter("z", ParameterType.FLOAT, lower=0, upper=5)
self.parameters = [x, y, z]
parameter_constraints = [
OrderConstraint(x, y),
SumConstraint([x, z], False, 3.5),
]
self.search_space = SearchSpace(self.parameters, parameter_constraints)
self.observation_features = [
ObservationFeatures(parameters={"x": 0.2, "y": 1.2, "z": 3}),
ObservationFeatures(parameters={"x": 0.4, "y": 1.4, "z": 3}),
ObservationFeatures(parameters={"x": 0.6, "y": 1.6, "z": 3}),
]
self.observation_data = [
ObservationData(
metric_names=["a", "b"],
means=np.array([1.0, -1.0]),
covariance=np.array([[1.0, 4.0], [4.0, 6.0]]),
),
ObservationData(
metric_names=["a", "b"],
means=np.array([2.0, -2.0]),
covariance=np.array([[2.0, 5.0], [5.0, 7.0]]),
),
ObservationData(
metric_names=["a"], means=np.array([3.0]), covariance=np.array([[3.0]])
),
]
self.observations = [
Observation(
features=self.observation_features[i],
data=self.observation_data[i],
arm_name=str(i),
)
for i in range(3)
]
self.pending_observations = {
"b": [ObservationFeatures(parameters={"x": 0.6, "y": 1.6, "z": 3})]
}
self.model_gen_options = {"option": "yes"}
@mock.patch("ax.modelbridge.numpy.NumpyModelBridge.__init__", return_value=None)
def testFitAndUpdate(self, mock_init):
sq_feat = ObservationFeatures({})
sq_data = self.observation_data[2]
sq_obs = Observation(
features=ObservationFeatures({}),
data=self.observation_data[2],
arm_name="status_quo",
)
ma = NumpyModelBridge()
ma._training_data = self.observations + [sq_obs]
model = mock.create_autospec(NumpyModel, instance=True)
# No out of design points allowed in direct calls to fit.
with self.assertRaises(ValueError):
ma._fit(
model,
self.search_space,
self.observation_features + [sq_feat],
self.observation_data + [sq_data],
)
ma._fit(
model, self.search_space, self.observation_features, self.observation_data
)
self.assertEqual(ma.parameters, ["x", "y", "z"])
self.assertEqual(sorted(ma.outcomes), ["a", "b"])
Xs = {
"a": np.array([[0.2, 1.2, 3.0], [0.4, 1.4, 3.0], [0.6, 1.6, 3.0]]),
"b": np.array([[0.2, 1.2, 3.0], [0.4, 1.4, 3.0]]),
}
Ys = {"a": np.array([[1.0], [2.0], [3.0]]), "b": np.array([[-1.0], [-2.0]])}
Yvars = {"a": np.array([[1.0], [2.0], [3.0]]), "b": np.array([[6.0], [7.0]])}
bounds = [(0.0, 1.0), (1.0, 2.0), (0.0, 5.0)]
model_fit_args = model.fit.mock_calls[0][2]
for i, x in enumerate(model_fit_args["Xs"]):
self.assertTrue(np.array_equal(x, Xs[ma.outcomes[i]]))
for i, y in enumerate(model_fit_args["Ys"]):
self.assertTrue(np.array_equal(y, Ys[ma.outcomes[i]]))
for i, v in enumerate(model_fit_args["Yvars"]):
self.assertTrue(np.array_equal(v, Yvars[ma.outcomes[i]]))
self.assertEqual(model_fit_args["bounds"], bounds)
self.assertEqual(model_fit_args["feature_names"], ["x", "y", "z"])
# And update
ma._update(
observation_features=self.observation_features,
observation_data=self.observation_data,
)
# Calling _update requires passing ALL data.
model_update_args = model.update.mock_calls[0][2]
for i, x in enumerate(model_update_args["Xs"]):
self.assertTrue(np.array_equal(x, Xs[ma.outcomes[i]]))
for i, y in enumerate(model_update_args["Ys"]):
self.assertTrue(np.array_equal(y, Ys[ma.outcomes[i]]))
for i, v in enumerate(model_update_args["Yvars"]):
self.assertTrue(np.array_equal(v, Yvars[ma.outcomes[i]]))
@mock.patch(
"ax.modelbridge.numpy.NumpyModelBridge._model_predict",
return_value=(
np.array([[1.0, -1], [2.0, -2]]),
np.stack(
(np.array([[1.0, 4.0], [4.0, 6]]), np.array([[2.0, 5.0], [5.0, 7]]))
),
),
autospec=True,
)
@mock.patch("ax.modelbridge.numpy.NumpyModelBridge.__init__", return_value=None)
def testPredict(self, mock_init, mock_predict):
ma = NumpyModelBridge()
ma.parameters = ["x", "y", "z"]
ma.outcomes = ["a", "b"]
observation_data = ma._predict(self.observation_features)
X = np.array([[0.2, 1.2, 3.0], [0.4, 1.4, 3.0], [0.6, 1.6, 3]])
self.assertTrue(np.array_equal(mock_predict.mock_calls[0][2]["X"], X))
for i, od in enumerate(observation_data):
self.assertEqual(od, self.observation_data[i])
@mock.patch(
"ax.modelbridge.numpy.NumpyModelBridge._model_gen",
autospec=True,
return_value=(
np.array([[1.0, 2.0, 3.0], [3.0, 4.0, 3.0]]),
np.array([1.0, 2.0]),
{},
[],
),
)
@mock.patch(
"ax.modelbridge.numpy.NumpyModelBridge._model_best_point",
autospec=True,
return_value=None,
)
@mock.patch("ax.modelbridge.numpy.NumpyModelBridge.__init__", return_value=None)
def testGen(self, mock_init, mock_best_point, mock_gen):
# Test with constraints
optimization_config = OptimizationConfig(
objective=Objective(Metric("a"), minimize=True),
outcome_constraints=[
OutcomeConstraint(Metric("b"), ComparisonOp.GEQ, 2, False)
],
)
ma = NumpyModelBridge()
ma.parameters = ["x", "y", "z"]
ma.outcomes = ["a", "b"]
ma.transforms = OrderedDict()
observation_features, weights, best_obsf, _ = ma._gen(
n=3,
search_space=self.search_space,
optimization_config=optimization_config,
pending_observations=self.pending_observations,
fixed_features=ObservationFeatures({"z": 3.0}),
model_gen_options=self.model_gen_options,
)
gen_args = mock_gen.mock_calls[0][2]
self.assertEqual(gen_args["n"], 3)
self.assertEqual(gen_args["bounds"], [(0.0, 1.0), (1.0, 2.0), (0.0, 5.0)])
self.assertTrue(
np.array_equal(gen_args["objective_weights"], np.array([-1.0, 0.0]))
)
self.assertTrue(
np.array_equal(gen_args["outcome_constraints"][0], np.array([[0.0, -1.0]]))
)
self.assertTrue(
np.array_equal(gen_args["outcome_constraints"][1], np.array([[-2]]))
)
self.assertTrue(
np.array_equal(
gen_args["linear_constraints"][0],
np.array([[1.0, -1, 0.0], [-1.0, 0.0, -1.0]]),
)
)
self.assertTrue(
np.array_equal(gen_args["linear_constraints"][1], np.array([[0.0], [-3.5]]))
)
self.assertEqual(gen_args["fixed_features"], {2: 3.0})
self.assertTrue(
np.array_equal(gen_args["pending_observations"][0], np.array([]))
)
self.assertTrue(
np.array_equal(
gen_args["pending_observations"][1], np.array([[0.6, 1.6, 3.0]])
)
)
self.assertEqual(gen_args["model_gen_options"], {"option": "yes"})
self.assertEqual(
observation_features[0].parameters, {"x": 1.0, "y": 2.0, "z": 3.0}
)
self.assertEqual(
observation_features[1].parameters, {"x": 3.0, "y": 4.0, "z": 3.0}
)
self.assertTrue(np.array_equal(weights, np.array([1.0, 2.0])))
# Test with multiple objectives.
oc2 = OptimizationConfig(
objective=ScalarizedObjective(
metrics=[Metric(name="a"), Metric(name="b")], minimize=True
)
)
observation_features, weights, best_obsf, _ = ma._gen(
n=3,
search_space=self.search_space,
optimization_config=oc2,
pending_observations=self.pending_observations,
fixed_features=ObservationFeatures({"z": 3.0}),
model_gen_options=self.model_gen_options,
)
gen_args = mock_gen.mock_calls[1][2]
self.assertEqual(gen_args["bounds"], [(0.0, 1.0), (1.0, 2.0), (0.0, 5.0)])
self.assertIsNone(gen_args["outcome_constraints"])
self.assertTrue(
np.array_equal(gen_args["objective_weights"], np.array([-1.0, -1.0]))
)
# Test with MultiObjective (unweighted multiple objectives)
oc3 = OptimizationConfig(
objective=MultiObjective(
metrics=[Metric(name="a"), Metric(name="b", lower_is_better=True)],
minimize=True,
)
)
search_space = SearchSpace(self.parameters) # Unconstrained
observation_features, weights, best_obsf, _ = ma._gen(
n=3,
search_space=search_space,
optimization_config=oc3,
pending_observations=self.pending_observations,
fixed_features=ObservationFeatures({"z": 3.0}),
model_gen_options=self.model_gen_options,
)
gen_args = mock_gen.mock_calls[2][2]
self.assertEqual(gen_args["bounds"], [(0.0, 1.0), (1.0, 2.0), (0.0, 5.0)])
self.assertIsNone(gen_args["outcome_constraints"])
self.assertTrue(
np.array_equal(gen_args["objective_weights"], np.array([1.0, -1.0]))
)
# Test with no constraints, no fixed feature, no pending observations
search_space = SearchSpace(self.parameters[:2])
optimization_config.outcome_constraints = []
ma.parameters = ["x", "y"]
ma._gen(3, search_space, {}, ObservationFeatures({}), None, optimization_config)
gen_args = mock_gen.mock_calls[3][2]
self.assertEqual(gen_args["bounds"], [(0.0, 1.0), (1.0, 2.0)])
self.assertIsNone(gen_args["outcome_constraints"])
self.assertIsNone(gen_args["linear_constraints"])
self.assertIsNone(gen_args["fixed_features"])
self.assertIsNone(gen_args["pending_observations"])
# Test validation
optimization_config = OptimizationConfig(
objective=Objective(Metric("a"), minimize=False),
outcome_constraints=[
OutcomeConstraint(Metric("b"), ComparisonOp.GEQ, 2, False)
],
)
with self.assertRaises(ValueError):
ma._gen(
n=3,
search_space=self.search_space,
optimization_config=optimization_config,
pending_observations={},
fixed_features=ObservationFeatures({}),
)
optimization_config.objective.minimize = True
optimization_config.outcome_constraints[0].relative = True
with self.assertRaises(ValueError):
ma._gen(
n=3,
search_space=self.search_space,
optimization_config=optimization_config,
pending_observations={},
fixed_features=ObservationFeatures({}),
)
@mock.patch(
"ax.modelbridge.numpy.NumpyModelBridge._model_cross_validate",
return_value=(
np.array([[1.0, -1], [2.0, -2]]),
np.stack(
(np.array([[1.0, 4.0], [4.0, 6]]), np.array([[2.0, 5.0], [5.0, 7]]))
),
),
autospec=True,
)
@mock.patch("ax.modelbridge.numpy.NumpyModelBridge.__init__", return_value=None)
def testCrossValidate(self, mock_init, mock_cv):
ma = NumpyModelBridge()
ma.parameters = ["x", "y", "z"]
ma.outcomes = ["a", "b"]
observation_data = ma._cross_validate(
self.observation_features, self.observation_data, self.observation_features
)
Xs = [
np.array([[0.2, 1.2, 3.0], [0.4, 1.4, 3.0], [0.6, 1.6, 3]]),
np.array([[0.2, 1.2, 3.0], [0.4, 1.4, 3.0]]),
]
Ys = [np.array([[1.0], [2.0], [3.0]]), np.array([[-1.0], [-2.0]])]
Yvars = [np.array([[1.0], [2.0], [3.0]]), np.array([[6.0], [7.0]])]
Xtest = np.array([[0.2, 1.2, 3.0], [0.4, 1.4, 3.0], [0.6, 1.6, 3]])
# Transform to arrays:
model_cv_args = mock_cv.mock_calls[0][2]
for i, x in enumerate(model_cv_args["Xs_train"]):
self.assertTrue(np.array_equal(x, Xs[i]))
for i, y in enumerate(model_cv_args["Ys_train"]):
self.assertTrue(np.array_equal(y, Ys[i]))
for i, v in enumerate(model_cv_args["Yvars_train"]):
self.assertTrue(np.array_equal(v, Yvars[i]))
self.assertTrue(np.array_equal(model_cv_args["X_test"], Xtest))
# Transform from arrays:
for i, od in enumerate(observation_data):
self.assertEqual(od, self.observation_data[i])
def testGetBoundsAndTask(self):
bounds, task_features, target_fidelities = get_bounds_and_task(
self.search_space, ["x", "y", "z"]
)
self.assertEqual(bounds, [(0.0, 1.0), (1.0, 2.0), (0.0, 5.0)])
self.assertEqual(task_features, [])
self.assertEqual(target_fidelities, {1: 2.0})
bounds, task_features, target_fidelities = get_bounds_and_task(
self.search_space, ["x", "z"]
)
self.assertEqual(target_fidelities, {})
# Test that Int param is treated as task feature
search_space = SearchSpace(self.parameters)
search_space._parameters["x"] = RangeParameter(
"x", ParameterType.INT, lower=1, upper=4
)
bounds, task_features, target_fidelities = get_bounds_and_task(
search_space, ["x", "y", "z"]
)
self.assertEqual(task_features, [0])
# Test validation
search_space._parameters["x"] = ChoiceParameter(
"x", ParameterType.FLOAT, [0.1, 0.4]
)
with self.assertRaises(ValueError):
get_bounds_and_task(search_space, ["x", "y", "z"])
search_space._parameters["x"] = RangeParameter(
"x", ParameterType.FLOAT, lower=1.0, upper=4.0, log_scale=True
)
with self.assertRaises(ValueError):
get_bounds_and_task(search_space, ["x", "y", "z"])
|
py | 1a3c095b72db96b141f8f21fa84456b293f1f1f6 | import matplotlib.pyplot as plt
import numpy as np
with open("log1.txt",'r',encoding='utf-8') as f:
train_x = []
train_y = []
dev_x = []
dev_y = []
step = 0
log=f.readline()
while(log):
log = log.split()
if "Step" in log:
index = log.index("Step")
step = int(log[index + 1].split('/')[0])
if step>950:
acc = float(log[index + 3][:-1])
ppl = float(log[index + 5][:-1])
train_x.append(step)
train_y.append([acc,ppl])
if "perplexity:" in log:
dev_x.append(step)
ppl = float(log[-1])
log = f.readline().split()
acc = float(log[-1])
dev_y.append([acc,ppl])
log = f.readline()
y = 'acc'
if y == 'acc':
train_y = np.array(train_y)[:,0]
dev_y = np.array(dev_y)[:,0]
else:
train_y = np.array(train_y)[:,1]
dev_y = np.array(dev_y)[:,1]
y = 'ppl'
plt.plot(train_x, train_y, label = "train")
plt.plot(dev_x, dev_y, label = "test")
plt.xlabel("steps")
plt.ylabel(y)
plt.legend()
plt.show() |
py | 1a3c0b996ebdd795cef4319c041d050c15c03e1f | # Copyright (c) 2021 AllSeeingEyeTolledEweSew
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
# OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
"""BTN support for TVAF."""
import contextlib
import logging
import pathlib
import sqlite3
from typing import Any
from typing import AsyncIterator
from typing import Awaitable
from typing import Callable
from typing import cast
from typing import Dict
from typing import Iterator
from typing import Optional
from typing import Tuple
from btn_cache import metadata_db
from btn_cache import site as btn_site
from btn_cache import storage as btn_storage
import dbver
import libtorrent as lt
import requests
from tvaf import concurrency
from tvaf import config as config_lib
from tvaf import lifecycle
from tvaf import services
from tvaf import swarm as tvaf_swarm
from tvaf import torrent_info
from tvaf.swarm import ConfigureSwarm
_LOG = logging.getLogger(__name__)
@lifecycle.singleton()
def get_storage() -> btn_storage.Storage:
return btn_storage.Storage(pathlib.Path("btn"))
def get_auth_from(config: config_lib.Config) -> btn_site.UserAuth:
return btn_site.UserAuth(
user_id=config.get_int("btn_user_id"),
auth=config.get_str("btn_auth"),
authkey=config.get_str("btn_authkey"),
passkey=config.get_str("btn_passkey"),
api_key=config.get_str("btn_api_key"),
)
@lifecycle.asingleton()
@services.startup_plugin("50_btn")
async def get_auth() -> btn_site.UserAuth:
return get_auth_from(await services.get_config())
@lifecycle.asingleton()
async def get_requests_session() -> requests.Session:
session = requests.Session()
session.headers.update({"User-Agent": "tvaf-btn"})
return session
@lifecycle.asingleton()
async def get_access() -> btn_site.UserAccess:
return btn_site.UserAccess(
auth=await get_auth(), session=await get_requests_session()
)
@services.stage_config_plugin("50_btn")
@contextlib.asynccontextmanager
async def stage_config(config: config_lib.Config) -> AsyncIterator[None]:
get_auth_from(config)
yield
get_auth.cache_clear()
get_access.cache_clear()
METADATA_DB_VERSION_SUPPORTED = 1_000_000
def get_metadata_db_conn() -> sqlite3.Connection:
path = get_storage().metadata_db_path
path.parent.mkdir(exist_ok=True, parents=True)
return sqlite3.Connection(path, isolation_level=None)
metadata_db_pool = dbver.null_pool(get_metadata_db_conn)
@contextlib.contextmanager
def read_metadata_db() -> Iterator[Tuple[sqlite3.Connection, int]]:
with dbver.begin_pool(metadata_db_pool, dbver.LockMode.DEFERRED) as conn:
version = metadata_db.get_version(conn)
dbver.semver_check_breaking(version, METADATA_DB_VERSION_SUPPORTED)
yield (conn, version)
@contextlib.contextmanager
def write_metadata_db() -> Iterator[Tuple[sqlite3.Connection, int]]:
# TODO: should we set WAL? where?
with dbver.begin_pool(metadata_db_pool, dbver.LockMode.IMMEDIATE) as conn:
version = metadata_db.upgrade(conn)
dbver.semver_check_breaking(version, METADATA_DB_VERSION_SUPPORTED)
yield (conn, version)
async def get_fetcher(
torrent_entry_id: int,
) -> Optional[Callable[[], Awaitable[bytes]]]:
access = await get_access()
# TODO: should btn_cache do this validation?
if access._auth.passkey is None:
return None
async def fetch() -> bytes:
# TODO: change to aiohttp
resp = await concurrency.to_thread(access.get_torrent, torrent_entry_id)
resp.raise_for_status()
return await concurrency.to_thread(getattr, resp, "content")
return fetch
async def fetch_and_store(info_hashes: lt.info_hash_t) -> None:
torrent_entry_id = await concurrency.to_thread(get_torrent_entry_id, info_hashes)
fetch = await get_fetcher(torrent_entry_id)
if fetch is None:
return
bencoded = await fetch()
bdecoded = cast(Dict[bytes, Any], lt.bdecode(bencoded))
# TODO: top-level publish
await concurrency.to_thread(
receive_bdecoded_info, torrent_entry_id, bdecoded[b"info"]
)
def map_file_sync(info_hashes: lt.info_hash_t, file_index: int) -> Tuple[int, int]:
with read_metadata_db() as (conn, version):
if version == 0:
raise KeyError(info_hashes)
hexdigest = info_hashes.get_best().to_bytes().hex()
cur = conn.cursor().execute(
"select file_info.id from torrent_entry inner join file_info "
"on torrent_entry.id = file_info.id "
"where torrent_entry.info_hash = ?",
(hexdigest,),
)
if cur.fetchone() is None:
_LOG.debug("map_file: no cached file_info")
raise KeyError(info_hashes)
cur = conn.cursor().execute(
"select file_info.start, file_info.stop from torrent_entry "
"inner join file_info on torrent_entry.id = file_info.id "
"where torrent_entry.info_hash = ? and file_index = ?",
(hexdigest, file_index),
)
row = cur.fetchone()
if row is None:
_LOG.debug("map_file: not found")
raise IndexError()
return cast(Tuple[int, int], row)
@torrent_info.map_file_plugin("30_btn")
async def map_file(info_hashes: lt.info_hash_t, file_index: int) -> Tuple[int, int]:
return await concurrency.to_thread(map_file_sync, info_hashes, file_index)
@torrent_info.map_file_plugin("90_btn_fetch")
async def fetch_and_map_file(
info_hashes: lt.info_hash_t, file_index: int
) -> Tuple[int, int]:
await fetch_and_store(info_hashes)
return await map_file(info_hashes, file_index)
def get_torrent_entry_id(info_hashes: lt.info_hash_t) -> int:
digest = info_hashes.get_best().to_bytes()
with read_metadata_db() as (conn, version):
if version == 0:
_LOG.debug("get_torrent_entry_id: empty db")
raise KeyError(info_hashes)
cur = conn.cursor().execute(
"select id from torrent_entry where info_hash = ? and not deleted "
"order by id desc",
(digest.hex(),),
)
row = cur.fetchone()
if row is None:
_LOG.debug("get_torrent_entry_id: not found")
raise KeyError(info_hashes)
(torrent_entry_id,) = cast(Tuple[int], row)
return torrent_entry_id
@tvaf_swarm.access_swarm_plugin("btn")
async def access_swarm(info_hashes: lt.info_hash_t) -> ConfigureSwarm:
torrent_entry_id = await concurrency.to_thread(get_torrent_entry_id, info_hashes)
fetch = await get_fetcher(torrent_entry_id)
if fetch is None:
raise KeyError(info_hashes)
async def configure_swarm(atp: lt.add_torrent_params) -> None:
assert fetch is not None # helps mypy
bencoded = await fetch()
bdecoded = cast(Dict[bytes, Any], lt.bdecode(bencoded))
atp.ti = lt.torrent_info(bdecoded)
# TODO: top-level publish
await concurrency.to_thread(
receive_bdecoded_info, torrent_entry_id, bdecoded[b"info"]
)
return configure_swarm
def receive_bdecoded_info(torrent_entry_id: int, info: Dict[bytes, Any]) -> None:
# We expect the common case to fail to find any ids to update, so we don't
# bother preparing the update outside the lock
with write_metadata_db() as (conn, _):
cur = conn.cursor().execute(
"SELECT id FROM file_info WHERE id = ?", (torrent_entry_id,)
)
row = cur.fetchone()
if row is not None:
return
update = metadata_db.ParsedTorrentInfoUpdate(
info, torrent_entry_id=torrent_entry_id
)
update.apply(conn)
@torrent_info.is_private_plugin("50_btn")
async def is_private(info_hashes: lt.info_hash_t) -> bool:
await concurrency.to_thread(get_torrent_entry_id, info_hashes)
return True
|
py | 1a3c0bc9d81bd890194d864ef3e0ef1434c30e1f | import sys
class Graph:
def __init__(self, v):
self.vertices_count = v
self.vertices = [i for i in range(v)]
self.adj_mat = [[0 for _ in range(v)] for _ in range(v)]
def connect_all(self):
self.adj_mat = []
for i in range(self.vertices_count):
raw_mat = []
for j in range(self.vertices_count):
raw_mat.append(0 if i == j else 1)
self.adj_mat.append(raw_mat)
def add_edge(self, u, v, weight=1, ordered=False):
#print("ADDING EDGE: (u: {}, v: {})".format(u, v))
self.adj_mat[u][v] = weight
if not ordered:
self.adj_mat[v][u] = weight
def print_graph(self):
for i in range(self.vertices_count):
for j in range(self.vertices_count):
print(self.adj_mat[i][j], end=' ')
print()
def breadth_first_search(self, src, dest):
visited = [False]*self.vertices_count
distance = [sys.maxsize]*self.vertices_count
previous_cell = [-1]*self.vertices_count
queue = []
visited[src] = True
distance[src] = 0
queue.append(src)
while not len(queue) == 0:
u = queue[0]
queue.remove(u)
for v in range(len(self.adj_mat[u])):
if not visited[v] and self.adj_mat[u][v] != 0:
visited[v] = True
distance[v] = distance[u] + 1
previous_cell[v] = u
queue.append(v)
return previous_cell
def get_shortest_path(self, src, dest):
return self.breadth_first_search(src, dest)
|
py | 1a3c0cbf01828e7ec460f859fec77a5bf5a04375 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy, AsyncARMChallengeAuthenticationPolicy
from .._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class ComputeManagementClientConfiguration(Configuration): # pylint: disable=too-many-instance-attributes
"""Configuration for ComputeManagementClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Subscription credentials which uniquely identify Microsoft Azure
subscription. The subscription ID forms part of the URI for every service call.
:type subscription_id: str
:keyword api_version: Api Version. Default value is "2021-03-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
**kwargs: Any
) -> None:
super(ComputeManagementClientConfiguration, self).__init__(**kwargs)
api_version = kwargs.pop('api_version', "2021-03-01") # type: str
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
self.credential = credential
self.subscription_id = subscription_id
self.api_version = api_version
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-compute/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs: Any
) -> None:
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = AsyncARMChallengeAuthenticationPolicy(self.credential, *self.credential_scopes, **kwargs)
|
py | 1a3c0d6784003245cded7b321a33332557a8ab97 | import cv2
import os
import utils
import numpy as np
import utils
dataset_root = '/media/jack/data/Dataset'
# use_image = ['cleanpass', 'finalpass'][0]
os.chdir(dataset_root)
print('dataset root on', os.getcwd())
for f1 in ['TRAIN', 'TEST']:
save_path = os.path.join('pytorch/flyingthings3d_s', f1)
os.makedirs(save_path, exist_ok=True)
os.makedirs(os.path.join(save_path, 'cleanpass'), exist_ok=True)
os.makedirs(os.path.join(save_path, 'finalpass'), exist_ok=True)
os.makedirs(os.path.join(save_path, 'left_disparity'), exist_ok=True)
os.makedirs(os.path.join(save_path, 'right_disparity'), exist_ok=True)
index = 0
f2 = 'A'
subfolder = f1 + '/' + f2
cleanpass_root = os.path.join(f'flyingthings3d/frames_cleanpass/', subfolder)
finalpass_root = os.path.join(f'flyingthings3d/frames_finalpass/', subfolder)
disparity_root = os.path.join('flyingthings3d/disparity/', subfolder)
for folder in os.listdir(cleanpass_root):
for file in os.listdir(os.path.join(cleanpass_root, folder, 'left')):
print('process [{}/{}] {}'.format(subfolder, folder, index))
# Clean pass
left_image = cv2.imread(os.path.join(cleanpass_root, folder, 'left', file))
right_image = cv2.imread(os.path.join(cleanpass_root, folder, 'right', file))
X = np.concatenate([left_image, right_image], axis=2)
X = X.swapaxes(0, 2).swapaxes(1, 2)
utils.save(X, os.path.join(save_path, f'cleanpass/{index:05d}.np'))
# Final pass
left_image = cv2.imread(os.path.join(finalpass_root, folder, 'left', file))
right_image = cv2.imread(os.path.join(finalpass_root, folder, 'right', file))
X = np.concatenate([left_image, right_image], axis=2)
X = X.swapaxes(0, 2).swapaxes(1, 2)
utils.save(X, os.path.join(save_path, f'finalpass/{index:05d}.np'))
# Left Disparity
Y = utils.read_pfm(os.path.join(disparity_root, folder, 'left', file[:4] + '.pfm')).squeeze()
utils.save(Y, os.path.join(save_path, f'left_disparity/{index:05d}.np'))
# Right Disparity
Y = utils.read_pfm(os.path.join(disparity_root, folder, 'right', file[:4] + '.pfm')).squeeze()
utils.save(Y, os.path.join(save_path, f'right_disparity/{index:05d}.np'))
index += 1
|
py | 1a3c0d6e628a97c1f38533efbe1aeee277650637 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2015, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
from __future__ import absolute_import
import flask
from ..app import bokeh_app
## This URL heirarchy is important, because of the way we build bokehjs
## the source mappings list the source file as being inside ../../src
@bokeh_app.route('/bokehjs/static/<path:filename>')
def bokehjs_file(filename):
""" Return a specific BokehJS deployment file
:param filename: name of the file to retrieve
:status 200: file is found
:status 404: file is not found
"""
return flask.send_from_directory(bokeh_app.bokehjsdir, filename)
@bokeh_app.route('/bokehjs/src/<path:filename>')
def bokehjssrc_file(filename):
""" Return a specific BokehJS source code file
:param filename: name of the file to retrieve
:status 200: file is found
:status 404: file is not found
"""
return flask.send_from_directory(bokeh_app.bokehjssrcdir, filename)
|
py | 1a3c0e384e8316ea2071a543661650753855ceec | from typing import List, Optional, Set, Dict
import aiosqlite
from mint.protocols.wallet_protocol import CoinState
from mint.types.blockchain_format.coin import Coin
from mint.types.blockchain_format.sized_bytes import bytes32
from mint.types.coin_record import CoinRecord
from mint.util.db_wrapper import DBWrapper
from mint.util.ints import uint32, uint64
from mint.util.lru_cache import LRUCache
from time import time
import logging
log = logging.getLogger(__name__)
class CoinStore:
"""
This object handles CoinRecords in DB.
A cache is maintained for quicker access to recent coins.
"""
coin_record_db: aiosqlite.Connection
coin_record_cache: LRUCache
cache_size: uint32
db_wrapper: DBWrapper
@classmethod
async def create(cls, db_wrapper: DBWrapper, cache_size: uint32 = uint32(60000)):
self = cls()
self.cache_size = cache_size
self.db_wrapper = db_wrapper
self.coin_record_db = db_wrapper.db
# the coin_name is unique in this table because the CoinStore always
# only represent a single peak
await self.coin_record_db.execute(
(
"CREATE TABLE IF NOT EXISTS coin_record("
"coin_name text PRIMARY KEY,"
" confirmed_index bigint,"
" spent_index bigint,"
" spent int,"
" coinbase int,"
" puzzle_hash text,"
" coin_parent text,"
" amount blob,"
" timestamp bigint)"
)
)
# Useful for reorg lookups
await self.coin_record_db.execute(
"CREATE INDEX IF NOT EXISTS coin_confirmed_index on coin_record(confirmed_index)"
)
await self.coin_record_db.execute("CREATE INDEX IF NOT EXISTS coin_spent_index on coin_record(spent_index)")
# earlier versions of mint created this index despite no lookups needing
# it. For now, just don't create it for new installs. In the future we
# may remove the index from existing installations as well
# await self.coin_record_db.execute("DROP INDEX IF EXISTS coin_spent")
await self.coin_record_db.execute("CREATE INDEX IF NOT EXISTS coin_puzzle_hash on coin_record(puzzle_hash)")
await self.coin_record_db.execute("CREATE INDEX IF NOT EXISTS coin_parent_index on coin_record(coin_parent)")
await self.coin_record_db.commit()
self.coin_record_cache = LRUCache(cache_size)
return self
async def new_block(
self,
height: uint32,
timestamp: uint64,
included_reward_coins: Set[Coin],
tx_additions: List[Coin],
tx_removals: List[bytes32],
) -> List[CoinRecord]:
"""
Only called for blocks which are blocks (and thus have rewards and transactions)
Returns a list of the CoinRecords that were added by this block
"""
start = time()
additions = []
for coin in tx_additions:
record: CoinRecord = CoinRecord(
coin,
height,
uint32(0),
False,
False,
timestamp,
)
additions.append(record)
if height == 0:
assert len(included_reward_coins) == 0
else:
assert len(included_reward_coins) >= 2
for coin in included_reward_coins:
reward_coin_r: CoinRecord = CoinRecord(
coin,
height,
uint32(0),
False,
True,
timestamp,
)
additions.append(reward_coin_r)
await self._add_coin_records(additions)
await self._set_spent(tx_removals, height)
end = time()
log.log(
logging.WARNING if end - start > 10 else logging.DEBUG,
f"It took {end - start:0.2f}s to apply {len(tx_additions)} additions and "
+ f"{len(tx_removals)} removals to the coin store. Make sure "
+ "blockchain database is on a fast drive",
)
return additions
# Checks DB and DiffStores for CoinRecord with coin_name and returns it
async def get_coin_record(self, coin_name: bytes32) -> Optional[CoinRecord]:
cached = self.coin_record_cache.get(coin_name)
if cached is not None:
return cached
cursor = await self.coin_record_db.execute("SELECT * from coin_record WHERE coin_name=?", (coin_name.hex(),))
row = await cursor.fetchone()
await cursor.close()
if row is not None:
coin = self.row_to_coin(row)
record = CoinRecord(coin, row[1], row[2], row[3], row[4], row[8])
self.coin_record_cache.put(record.coin.name(), record)
return record
return None
async def get_coins_added_at_height(self, height: uint32) -> List[CoinRecord]:
cursor = await self.coin_record_db.execute("SELECT * from coin_record WHERE confirmed_index=?", (height,))
rows = await cursor.fetchall()
await cursor.close()
coins = []
for row in rows:
coin = self.row_to_coin(row)
coins.append(CoinRecord(coin, row[1], row[2], row[3], row[4], row[8]))
return coins
async def get_coins_removed_at_height(self, height: uint32) -> List[CoinRecord]:
# Special case to avoid querying all unspent coins (spent_index=0)
if height == 0:
return []
cursor = await self.coin_record_db.execute("SELECT * from coin_record WHERE spent_index=?", (height,))
rows = await cursor.fetchall()
await cursor.close()
coins = []
for row in rows:
spent: bool = bool(row[3])
if spent:
coin = self.row_to_coin(row)
coin_record = CoinRecord(coin, row[1], row[2], spent, row[4], row[8])
coins.append(coin_record)
return coins
# Checks DB and DiffStores for CoinRecords with puzzle_hash and returns them
async def get_coin_records_by_puzzle_hash(
self,
include_spent_coins: bool,
puzzle_hash: bytes32,
start_height: uint32 = uint32(0),
end_height: uint32 = uint32((2 ** 32) - 1),
) -> List[CoinRecord]:
coins = set()
cursor = await self.coin_record_db.execute(
f"SELECT * from coin_record INDEXED BY coin_puzzle_hash WHERE puzzle_hash=? "
f"AND confirmed_index>=? AND confirmed_index<? "
f"{'' if include_spent_coins else 'AND spent=0'}",
(puzzle_hash.hex(), start_height, end_height),
)
rows = await cursor.fetchall()
await cursor.close()
for row in rows:
coin = self.row_to_coin(row)
coins.add(CoinRecord(coin, row[1], row[2], row[3], row[4], row[8]))
return list(coins)
async def get_coin_records_by_puzzle_hashes(
self,
include_spent_coins: bool,
puzzle_hashes: List[bytes32],
start_height: uint32 = uint32(0),
end_height: uint32 = uint32((2 ** 32) - 1),
) -> List[CoinRecord]:
if len(puzzle_hashes) == 0:
return []
coins = set()
puzzle_hashes_db = tuple([ph.hex() for ph in puzzle_hashes])
cursor = await self.coin_record_db.execute(
f"SELECT * from coin_record INDEXED BY coin_puzzle_hash "
f'WHERE puzzle_hash in ({"?," * (len(puzzle_hashes) - 1)}?) '
f"AND confirmed_index>=? AND confirmed_index<? "
f"{'' if include_spent_coins else 'AND spent=0'}",
puzzle_hashes_db + (start_height, end_height),
)
rows = await cursor.fetchall()
await cursor.close()
for row in rows:
coin = self.row_to_coin(row)
coins.add(CoinRecord(coin, row[1], row[2], row[3], row[4], row[8]))
return list(coins)
async def get_coin_records_by_names(
self,
include_spent_coins: bool,
names: List[bytes32],
start_height: uint32 = uint32(0),
end_height: uint32 = uint32((2 ** 32) - 1),
) -> List[CoinRecord]:
if len(names) == 0:
return []
coins = set()
names_db = tuple([name.hex() for name in names])
cursor = await self.coin_record_db.execute(
f'SELECT * from coin_record WHERE coin_name in ({"?," * (len(names) - 1)}?) '
f"AND confirmed_index>=? AND confirmed_index<? "
f"{'' if include_spent_coins else 'AND spent=0'}",
names_db + (start_height, end_height),
)
rows = await cursor.fetchall()
await cursor.close()
for row in rows:
coin = self.row_to_coin(row)
coins.add(CoinRecord(coin, row[1], row[2], row[3], row[4], row[8]))
return list(coins)
def row_to_coin(self, row) -> Coin:
return Coin(bytes32(bytes.fromhex(row[6])), bytes32(bytes.fromhex(row[5])), uint64.from_bytes(row[7]))
def row_to_coin_state(self, row):
coin = self.row_to_coin(row)
spent_h = None
if row[3]:
spent_h = row[2]
return CoinState(coin, spent_h, row[1])
async def get_coin_states_by_puzzle_hashes(
self,
include_spent_coins: bool,
puzzle_hashes: List[bytes32],
start_height: uint32 = uint32(0),
end_height: uint32 = uint32((2 ** 32) - 1),
) -> List[CoinState]:
if len(puzzle_hashes) == 0:
return []
coins = set()
puzzle_hashes_db = tuple([ph.hex() for ph in puzzle_hashes])
cursor = await self.coin_record_db.execute(
f'SELECT * from coin_record WHERE puzzle_hash in ({"?," * (len(puzzle_hashes) - 1)}?) '
f"AND confirmed_index>=? AND confirmed_index<? "
f"{'' if include_spent_coins else 'AND spent=0'}",
puzzle_hashes_db + (start_height, end_height),
)
rows = await cursor.fetchall()
await cursor.close()
for row in rows:
coins.add(self.row_to_coin_state(row))
return list(coins)
async def get_coin_records_by_parent_ids(
self,
include_spent_coins: bool,
parent_ids: List[bytes32],
start_height: uint32 = uint32(0),
end_height: uint32 = uint32((2 ** 32) - 1),
) -> List[CoinRecord]:
if len(parent_ids) == 0:
return []
coins = set()
parent_ids_db = tuple([pid.hex() for pid in parent_ids])
cursor = await self.coin_record_db.execute(
f'SELECT * from coin_record WHERE coin_parent in ({"?," * (len(parent_ids) - 1)}?) '
f"AND confirmed_index>=? AND confirmed_index<? "
f"{'' if include_spent_coins else 'AND spent=0'}",
parent_ids_db + (start_height, end_height),
)
rows = await cursor.fetchall()
await cursor.close()
for row in rows:
coin = self.row_to_coin(row)
coins.add(CoinRecord(coin, row[1], row[2], row[3], row[4], row[8]))
return list(coins)
async def get_coin_state_by_ids(
self,
include_spent_coins: bool,
coin_ids: List[bytes32],
start_height: uint32 = uint32(0),
end_height: uint32 = uint32((2 ** 32) - 1),
) -> List[CoinState]:
if len(coin_ids) == 0:
return []
coins = set()
coin_ids_db = tuple([pid.hex() for pid in coin_ids])
cursor = await self.coin_record_db.execute(
f'SELECT * from coin_record WHERE coin_name in ({"?," * (len(coin_ids) - 1)}?) '
f"AND confirmed_index>=? AND confirmed_index<? "
f"{'' if include_spent_coins else 'AND spent=0'}",
coin_ids_db + (start_height, end_height),
)
rows = await cursor.fetchall()
await cursor.close()
for row in rows:
coins.add(self.row_to_coin_state(row))
return list(coins)
async def rollback_to_block(self, block_index: int) -> List[CoinRecord]:
"""
Note that block_index can be negative, in which case everything is rolled back
Returns the list of coin records that have been modified
"""
# Update memory cache
delete_queue: bytes32 = []
for coin_name, coin_record in list(self.coin_record_cache.cache.items()):
if int(coin_record.spent_block_index) > block_index:
new_record = CoinRecord(
coin_record.coin,
coin_record.confirmed_block_index,
uint32(0),
False,
coin_record.coinbase,
coin_record.timestamp,
)
self.coin_record_cache.put(coin_record.coin.name(), new_record)
if int(coin_record.confirmed_block_index) > block_index:
delete_queue.append(coin_name)
for coin_name in delete_queue:
self.coin_record_cache.remove(coin_name)
coin_changes: Dict[bytes32, CoinRecord] = {}
cursor_deleted = await self.coin_record_db.execute(
"SELECT * FROM coin_record WHERE confirmed_index>?", (block_index,)
)
rows = await cursor_deleted.fetchall()
for row in rows:
coin = self.row_to_coin(row)
record = CoinRecord(coin, uint32(0), row[2], row[3], row[4], uint64(0))
coin_changes[record.name] = record
await cursor_deleted.close()
# Delete from storage
c1 = await self.coin_record_db.execute("DELETE FROM coin_record WHERE confirmed_index>?", (block_index,))
await c1.close()
cursor_unspent = await self.coin_record_db.execute(
"SELECT * FROM coin_record WHERE confirmed_index>?", (block_index,)
)
rows = await cursor_unspent.fetchall()
for row in rows:
coin = self.row_to_coin(row)
record = CoinRecord(coin, row[1], uint32(0), False, row[4], row[8])
if record.name not in coin_changes:
coin_changes[record.name] = record
await cursor_unspent.close()
c2 = await self.coin_record_db.execute(
"UPDATE coin_record SET spent_index = 0, spent = 0 WHERE spent_index>?",
(block_index,),
)
await c2.close()
return list(coin_changes.values())
# Store CoinRecord in DB and ram cache
async def _add_coin_records(self, records: List[CoinRecord]) -> None:
values = []
for record in records:
self.coin_record_cache.put(record.coin.name(), record)
values.append(
(
record.coin.name().hex(),
record.confirmed_block_index,
record.spent_block_index,
int(record.spent),
int(record.coinbase),
record.coin.puzzle_hash.hex(),
record.coin.parent_coin_info.hex(),
bytes(record.coin.amount),
record.timestamp,
)
)
cursor = await self.coin_record_db.executemany(
"INSERT INTO coin_record VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)",
values,
)
await cursor.close()
# Update coin_record to be spent in DB
async def _set_spent(self, coin_names: List[bytes32], index: uint32):
# if this coin is in the cache, mark it as spent in there
updates = []
for coin_name in coin_names:
r = self.coin_record_cache.get(coin_name)
if r is not None:
self.coin_record_cache.put(
r.name, CoinRecord(r.coin, r.confirmed_block_index, index, True, r.coinbase, r.timestamp)
)
updates.append((index, coin_name.hex()))
await self.coin_record_db.executemany(
"UPDATE OR FAIL coin_record SET spent=1,spent_index=? WHERE coin_name=?", updates
)
|
py | 1a3c0e73d62ab9e8593abd0d24ea126a0211acb3 | import tkinter as app
from tkinter import messagebox
from tkinter import Scrollbar
import random
#definitions
family = "sans serif"
primary_c = "#2065d4"
second_c = "#e8e8e8"
inherit = "white"
algorithm_count = 5
keywords = []
passwords = []
window = app.Tk()
window.title("Password Generator")
window.config(bg=inherit)
#elements
scrollbar = Scrollbar(window)
scrollbar.pack( side = app.RIGHT, fill = app.Y )
main_header = app.Label(window, text="PASSWORD GENERATOR",fg=primary_c, bg=inherit, font=(family, 24), pady=10, padx=20)
sub_info = app.Label(window, text="This program generates passwords using the keywords you enter. Currently, the program is using {} algorithm(s) to generate passwords.".format(algorithm_count), fg="grey", bg=inherit, font=(family, 12), wraplength=440, justify=app.LEFT, pady=10, padx=10)
input_box = app.Entry(window,font=(family, 16), width=30, borderwidth=10, fg=primary_c, bg=second_c, relief=app.FLAT)
submit = app.Button(window, text="Generate", font=(family, 12), width=35, borderwidth=5,relief=app.RAISED, bg=primary_c,activebackground=primary_c, fg=inherit, activeforeground=inherit)
pass_header = app.Label(window, text="Passwords", fg=primary_c, bg=inherit, pady=10, font=(family, 16))
pass_list = app.Listbox(window, yscrollcommand = scrollbar.set, bg=primary_c, fg=inherit, font=(family, 12), width=42, bd=5)
def main():
submit.config(command = start_app)
#packing
main_header.pack()
sub_info.pack()
input_box.pack()
add_space(window, 1)
submit.pack()
pass_header.pack()
pass_list.pack()
add_space(window, 1)
scrollbar.config(command = pass_list.yview)
messagebox.showinfo("Credit", "This was programmed by Rejwan Islam Rizvy #RIR360")
window.mainloop()
#functions
def add_space(master, amount):
white_space = app.Label(master, text=" ", height=amount, bg=inherit)
white_space.pack()
def start_app():
input = input_box.get()
if not input:
messagebox.showwarning("No Input", "Enter some keywards first")
return 1
keywords = input.split()
pass_list.delete(0, app.END)
#generating passwords
algo_1(keywords, pass_list)
algo_2(keywords, pass_list)
algo_3(keywords, pass_list)
algo_4(keywords, pass_list)
algo_5(keywords, pass_list)
#algorithms
def algo_1(words, list):
password = str(random.randint(0,9))*random.randint(0, 5) + str("".join(words)) + str(random.randint(0,9))*random.randint(0, 5)
list.insert(app.END, password)
def algo_2(words, list):
password = ""
for word in words:
for char in word:
if random.randint(0, 1):
password += char.upper()
else:
password += char.lower()
list.insert(app.END, password)
def algo_3(words, list):
password = ""
for word in words:
if random.randint(0, 1):
password += word.upper()
else:
password += word.lower()
password += str(random.randint(0,9))*random.randint(1, 5)
list.insert(app.END, password)
def algo_4(words, list):
password = ""
password += str(chr(random.randint(33,47)))*random.randint(1, 5)
for word in words:
if random.randint(0, 1):
password += word.capitalize()
else:
password += word.lower()
password += str(chr(random.randint(33,47)))*random.randint(1, 5)
list.insert(app.END, password)
def algo_5(words, list):
password = ""
password += str(chr(random.randint(33,47)))*random.randint(1, 5)
for word in words:
if random.randint(0, 1):
password += word.capitalize()
else:
password += word.lower()
password += str(random.randint(0,9))*random.randint(1, 5)
list.insert(app.END, password)
main()
|
py | 1a3c0e7642051420ebd11dbe9f27cd480bef3498 | #
# Copyright (c) 2015, Adam Meily <[email protected]>
# Pypsi - https://github.com/ameily/pypsi
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
import threading
import sys
from pypsi.ansi import AnsiCode, AnsiCodes
from pypsi.os import make_ansi_stream
class ThreadLocalStream(object):
'''
A stream wrapper that is thread-local. This class enables thread-based
pipes by wrapping :attr:`sys.stdout`, :attr:`sys.stderr`, and
:attr:`sys.stdin` and making access to them thread-local. This allows each
thread to, potentially, each thread to write to a different stream.
A single stream, such as stdout, is wrapped in Pypsi as such:
stdout -> thread local stream -> os-specific ansi stream
'''
DefaultAnsiStreamKwargs = dict()
def __init__(self, target, **kwargs):
'''
:param file target: the original target stream (typically either
:attr:`sys.stdout`, :attr:`sys.stderr`, and :attr:`sys.stdin`).
:param int width: the width of the stream in characters, this attribute
determines if word wrapping is enabled and how wide the lines are.
:param bool isatty: whether the underlying stream is a tty stream,
which supports ANSI escape cdes.
'''
if ThreadLocalStream.DefaultAnsiStreamKwargs:
kw = dict(ThreadLocalStream.DefaultAnsiStreamKwargs)
kw.update(kwargs)
kwargs = kw
self._target = make_ansi_stream(target, **kwargs)
self._proxies = {}
def _get_target(self):
'''
Get the target tuple for the current thread.
:returns tuple: (target, width, isatty).
'''
return self._proxies.get(threading.current_thread().ident,
self._target)
def __getattr__(self, name):
return getattr(self._get_target(), name)
def __hasattr__(self, name):
attrs = ('_proxy', '_unproxy', '_get_target', '_proxies', '_target')
return name in attrs or hasattr(self._get_target(), name)
def _proxy(self, target, **kwargs):
'''
Set a thread-local stream.
:param file target: the target stream.
:param int width: the stream width, in characters.
:param bool isatty: whether the target stream is a tty stream.
'''
self._proxies[threading.current_thread().ident] = make_ansi_stream(
target, **kwargs
)
def _unproxy(self, ident=None):
'''
Delete the proxy for a thread.
:param int ident: the thread's :attr:`~threading.Thread.ident`
attribute, or :const:`None` if the current thread's proxy is being
deleted.
'''
ident = ident or threading.current_thread().ident
if ident in self._proxies:
del self._proxies[ident]
def ansi_format(self, tmpl, **kwargs):
'''
Format a string that contains ansi code terms. This function allows
the following string to be the color red:
``sys.stdout.ansi_format("{red}Hello, {name}{reset}", name="Adam")``
The :data:`pypsi.ansi.AnsiCodesSingleton.codes` dict contains all
valid ansi escape code terms. If the current stream does not support
ansi escape codes, they are dropped from the template prior to
printing.
:param str tmpl: the string template
'''
atty = self._get_target().isatty()
for (name, value) in kwargs.items():
if isinstance(value, AnsiCode):
kwargs[name] = str(value) if atty else ''
for (name, code) in AnsiCodes.codes.items():
kwargs[name] = code.code if atty else ''
return tmpl.format(**kwargs)
def ansi_format_prompt(self, tmpl, **kwargs):
'''
Format a string that contains ansi code terms. This function allows
performs the same formatting as :meth:`ansi_format`, except this is
intended for formatting strings in prompt by calling
:meth:`pypsi.ansi.AnsiCode.prompt` for each code.
'''
atty = self._get_target().isatty()
for (name, value) in kwargs.items():
if isinstance(value, AnsiCode):
kwargs[name] = value.prompt() if atty else ''
for (name, code) in AnsiCodes.codes.items():
kwargs[name] = code.prompt() if atty else ''
return tmpl.format(**kwargs)
def render(self, parts, prompt=False):
'''
Render a list of objects as single string. This method is the
string version of the :meth:`print` method. Also, this method will
honor the current thread's :meth:`isatty` when rendering ANSI escape
codes.
:param list parts: list of object to render.
:param bool prompt: whether to render
:class:`~pypsi.ansi.AnsiCode` objects as prompts or not.
:returns str: the rendered string.
'''
r = []
target = self._get_target()
for part in parts:
if isinstance(part, AnsiCode):
if target.isatty():
if prompt:
r.append(part.prompt())
else:
r.append(str(part))
elif part.s:
r.append(part.s)
else:
r.append(str(part))
return ''.join(r)
class InvocationThread(threading.Thread):
'''
An invocation of a command from the command line interface.
'''
def __init__(self, shell, invoke, stdin=None, stdout=None, stderr=None):
'''
:param pypsi.shell.Shell shell: the active shell.
:param pypsi.cmdline.CommandInvocation invoke: the invocation to
execute.
:param stream stdin: override the invocation's stdin stream.
:param stream stdout: override the invocation's stdout stream.
:param stream stderr; override the invocation's stder stream.
'''
super(InvocationThread, self).__init__()
#: The active Shell
self.shell = shell
#: The :class:`~pypsi.cmdline.CommandInvocation` to execute.
self.invoke = invoke
#: Exception info, as returned by :meth:`sys.exc_info` if an exception
#: occurred.
self.exc_info = None
#: The invocation return code.
self.rc = None
if stdin:
self.invoke.stdin = stdin
if stdout:
self.invoke.stdout = stdout
if stderr:
self.invoke.stderr = stderr
def run(self):
'''
Run the command invocation.
'''
try:
self.rc = self.invoke(self.shell)
except:
self.exc_info = sys.exc_info()
self.rc = None
finally:
pass
def stop(self):
'''
Attempt to stop the thread by explitily closing the stdin, stdout, and
stderr streams.
'''
if self.is_alive():
try:
self.invoke.close_streams()
except:
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.