ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b40e31e3b37779eab623097007c0799d66e9b735 | #!/usr/bin/env python3
# also, use 'function(**kwargs)' like
# function(name1 = value1, name2 = value2)
# should be able to get named buttons from key/value
print("Loading GpioZero..")
from time import sleep
from gpiozero import ButtonBoard, LED
btns = ButtonBoard(17, 25, 24, 16)
led = LED(4)
print("Press any button, Led will flash times of buttons number.")
print("And Ctrl-C to exit...")
def flashTimes(times):
x = 0
while x is not times:
sleep(0.1)
print(x + 1)
led.on()
sleep(0.2)
led.off()
x = x + 1
print("Press another...")
while True:
try:
if btns[0].is_pressed:
print("button 1 pressed")
flashTimes(1)
elif btns[1].is_pressed:
print("button 2 pressed")
flashTimes(2)
elif btns[2].is_pressed:
print("button 3 pressed")
flashTimes(3)
elif btns[3].is_pressed:
print("button 4 pressed")
flashTimes(4)
except KeyboardInterrupt:
print("\rExiting")
exit(0)
|
py | b40e3205659854393a7190158567f0a8e29f7e5b | from django.contrib import admin
from adminsortable.admin import SortableAdmin
from seating_charts.models import Table, MealTime, SeatFiller, PinnedStudent, Layout, SeatingStudent, Ethnicity
class SeatFillerInline(admin.TabularInline):
model = SeatFiller
extra = 0
class PinnedStudentInline(admin.TabularInline):
model = PinnedStudent
extra = 0
class TableAdmin(admin.ModelAdmin):
filter_horizontal = ['for_meals']
inlines = [SeatFillerInline, PinnedStudentInline]
class MealTimeAdmin(SortableAdmin):
filter_horizontal = ['include_grades']
class SeatingStudentAdmin(admin.ModelAdmin):
fields = ['enrollment', 'ethnicity', 'food_allergy']
readonly_fields = ['enrollment']
list_filter = ['enrollment__grade', 'enrollment__grade__school', 'ethnicity']
list_display = ['__str__', 'ethnicity', 'food_allergy']
list_editable = ['ethnicity', 'food_allergy']
def has_add_permission(request, *args, **kwargs):
return False
def has_delete_permission(request, obj=None, *args, **kwargs):
return False
admin.site.register(Table, TableAdmin)
admin.site.register(MealTime, MealTimeAdmin)
admin.site.register(Layout)
admin.site.register(SeatingStudent, SeatingStudentAdmin)
admin.site.register(Ethnicity) |
py | b40e3218da1de5995a2d8bec5cf74e5f322e5f9d | #*****************************************************
# *
# Copyright 2018 Amazon.com, Inc. or its affiliates. *
# All Rights Reserved. *
# *
#*****************************************************
""" A sample lambda for face detection"""
from threading import Thread, Event
import os
import json
import numpy as np
import awscam
import cv2
import greengrasssdk
class LocalDisplay(Thread):
""" Class for facilitating the local display of inference results
(as images). The class is designed to run on its own thread. In
particular the class dumps the inference results into a FIFO
located in the tmp directory (which lambda has access to). The
results can be rendered using mplayer by typing:
mplayer -demuxer lavf -lavfdopts format=mjpeg:probesize=32 /tmp/results.mjpeg
"""
def __init__(self, resolution):
""" resolution - Desired resolution of the project stream """
# Initialize the base class, so that the object can run on its own
# thread.
super(LocalDisplay, self).__init__()
# List of valid resolutions
RESOLUTION = {'1080p' : (1920, 1080), '720p' : (1280, 720), '480p' : (858, 480)}
if resolution not in RESOLUTION:
raise Exception("Invalid resolution")
self.resolution = RESOLUTION[resolution]
# Initialize the default image to be a white canvas. Clients
# will update the image when ready.
self.frame = cv2.imencode('.jpg', 255*np.ones([640, 480, 3]))[1]
self.stop_request = Event()
def run(self):
""" Overridden method that continually dumps images to the desired
FIFO file.
"""
# Path to the FIFO file. The lambda only has permissions to the tmp
# directory. Pointing to a FIFO file in another directory
# will cause the lambda to crash.
result_path = '/tmp/results.mjpeg'
# Create the FIFO file if it doesn't exist.
if not os.path.exists(result_path):
os.mkfifo(result_path)
# This call will block until a consumer is available
with open(result_path, 'w') as fifo_file:
while not self.stop_request.isSet():
try:
# Write the data to the FIFO file. This call will block
# meaning the code will come to a halt here until a consumer
# is available.
fifo_file.write(self.frame.tobytes())
except IOError:
continue
def set_frame_data(self, frame):
""" Method updates the image data. This currently encodes the
numpy array to jpg but can be modified to support other encodings.
frame - Numpy array containing the image data tof the next frame
in the project stream.
"""
ret, jpeg = cv2.imencode('.jpg', cv2.resize(frame, self.resolution))
if not ret:
raise Exception('Failed to set frame data')
self.frame = jpeg
def join(self):
self.stop_request.set()
def infinite_infer_run():
""" Entry point of the lambda function"""
try:
# This face detection model is implemented as single shot detector (ssd).
model_type = 'ssd'
output_map = {1: 'face'}
# Create an IoT client for sending to messages to the cloud.
client = greengrasssdk.client('iot-data')
iot_topic = '$aws/things/{}/infer'.format(os.environ['AWS_IOT_THING_NAME'])
# Create a local display instance that will dump the image bytes to a FIFO
# file that the image can be rendered locally.
local_display = LocalDisplay('480p')
local_display.start()
# The sample projects come with optimized artifacts, hence only the artifact
# path is required.
model_path = '/opt/awscam/artifacts/mxnet_deploy_ssd_FP16_FUSED.xml'
# Load the model onto the GPU.
client.publish(topic=iot_topic, payload='Loading face detection model')
model = awscam.Model(model_path, {'GPU': 1})
client.publish(topic=iot_topic, payload='Face detection model loaded')
# Set the threshold for detection
detection_threshold = 0.25
# The height and width of the training set images
input_height = 300
input_width = 300
# Do inference until the lambda is killed.
while True:
# Get a frame from the video stream
ret, frame = awscam.getLastFrame()
if not ret:
raise Exception('Failed to get frame from the stream')
# Resize frame to the same size as the training set.
frame_resize = cv2.resize(frame, (input_height, input_width))
# Run the images through the inference engine and parse the results using
# the parser API, note it is possible to get the output of doInference
# and do the parsing manually, but since it is a ssd model,
# a simple API is provided.
parsed_inference_results = model.parseResult(model_type,
model.doInference(frame_resize))
# Compute the scale in order to draw bounding boxes on the full resolution
# image.
yscale = float(frame.shape[0]) / float(input_height)
xscale = float(frame.shape[1]) / float(input_width)
# Dictionary to be filled with labels and probabilities for MQTT
cloud_output = {}
# Get the detected faces and probabilities
for obj in parsed_inference_results[model_type]:
if obj['prob'] > detection_threshold:
# Add bounding boxes to full resolution frame
xmin = int(xscale * obj['xmin'])
ymin = int(yscale * obj['ymin'])
xmax = int(xscale * obj['xmax'])
ymax = int(yscale * obj['ymax'])
# See https://docs.opencv.org/3.4.1/d6/d6e/group__imgproc__draw.html
# for more information about the cv2.rectangle method.
# Method signature: image, point1, point2, color, and tickness.
# cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), (255, 165, 20), 10)
# Amount to offset the label/probability text above the bounding box.
# text_offset = 15
# See https://docs.opencv.org/3.4.1/d6/d6e/group__imgproc__draw.html
# for more information about the cv2.putText method.
# Method signature: image, text, origin, font face, font scale, color,
# and tickness
# cv2.putText(frame, '{:.2f}%'.format(obj['prob'] * 100),
# (xmin, ymin-text_offset),
# cv2.FONT_HERSHEY_SIMPLEX, 2.5, (255, 165, 20), 6)
# Store label and probability to send to cloud
cloud_output[output_map[obj['label']]] = obj['prob']
frame[ymin:ymax,xmin:xmax,] = cv2.GaussianBlur(frame[ymin:ymax,xmin:xmax,],(19,19),0)
# Set the next frame in the local display stream.
local_display.set_frame_data(frame)
# Send results to the cloud
client.publish(topic=iot_topic, payload=json.dumps(cloud_output))
except Exception as ex:
client.publish(topic=iot_topic, payload='Error in face detection lambda: {}'.format(ex))
infinite_infer_run()
|
py | b40e33a44d23461ac581a58eacba257b3028ba3b | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import datetime
import gettext
import iso8601
from unittest import mock
from oslo_versionedobjects import base as object_base
from oslo_versionedobjects import exception as object_exception
from oslo_versionedobjects import fixture as object_fixture
from watcher.common import context
from watcher.objects import base
from watcher.objects import fields
from watcher.tests import base as test_base
gettext.install('watcher')
@base.WatcherObjectRegistry.register
class MyObj(base.WatcherPersistentObject, base.WatcherObject,
base.WatcherObjectDictCompat):
VERSION = '1.5'
fields = {'foo': fields.IntegerField(),
'bar': fields.StringField(),
'missing': fields.StringField()}
def obj_load_attr(self, attrname):
setattr(self, attrname, 'loaded!')
@object_base.remotable_classmethod
def query(cls, context):
obj = cls(context)
obj.foo = 1
obj.bar = 'bar'
obj.obj_reset_changes()
return obj
@object_base.remotable
def marco(self, context=None):
return 'polo'
@object_base.remotable
def update_test(self, context=None):
if context and context.user == 'alternate':
self.bar = 'alternate-context'
else:
self.bar = 'updated'
@object_base.remotable
def save(self, context=None):
self.obj_reset_changes()
@object_base.remotable
def refresh(self, context=None):
self.foo = 321
self.bar = 'refreshed'
self.obj_reset_changes()
@object_base.remotable
def modify_save_modify(self, context=None):
self.bar = 'meow'
self.save()
self.foo = 42
class MyObj2(object):
@classmethod
def obj_name(cls):
return 'MyObj'
@object_base.remotable_classmethod
def get(cls, *args, **kwargs):
pass
@base.WatcherObjectRegistry.register_if(False)
class WatcherTestSubclassedObject(MyObj):
fields = {'new_field': fields.StringField()}
class _LocalTest(test_base.TestCase):
def setUp(self):
super(_LocalTest, self).setUp()
# Just in case
base.WatcherObject.indirection_api = None
@contextlib.contextmanager
def things_temporarily_local():
# Temporarily go non-remote so the conductor handles
# this request directly
_api = base.WatcherObject.indirection_api
base.WatcherObject.indirection_api = None
yield
base.WatcherObject.indirection_api = _api
class _TestObject(object):
def test_hydration_type_error(self):
primitive = {'watcher_object.name': 'MyObj',
'watcher_object.namespace': 'watcher',
'watcher_object.version': '1.5',
'watcher_object.data': {'foo': 'a'}}
self.assertRaises(ValueError, MyObj.obj_from_primitive, primitive)
def test_hydration(self):
primitive = {'watcher_object.name': 'MyObj',
'watcher_object.namespace': 'watcher',
'watcher_object.version': '1.5',
'watcher_object.data': {'foo': 1}}
obj = MyObj.obj_from_primitive(primitive)
self.assertEqual(1, obj.foo)
def test_hydration_bad_ns(self):
primitive = {'watcher_object.name': 'MyObj',
'watcher_object.namespace': 'foo',
'watcher_object.version': '1.5',
'watcher_object.data': {'foo': 1}}
self.assertRaises(object_exception.UnsupportedObjectError,
MyObj.obj_from_primitive, primitive)
def test_dehydration(self):
expected = {'watcher_object.name': 'MyObj',
'watcher_object.namespace': 'watcher',
'watcher_object.version': '1.5',
'watcher_object.data': {'foo': 1}}
obj = MyObj(self.context)
obj.foo = 1
obj.obj_reset_changes()
self.assertEqual(expected, obj.obj_to_primitive())
def test_get_updates(self):
obj = MyObj(self.context)
self.assertEqual({}, obj.obj_get_changes())
obj.foo = 123
self.assertEqual({'foo': 123}, obj.obj_get_changes())
obj.bar = 'test'
self.assertEqual({'foo': 123, 'bar': 'test'}, obj.obj_get_changes())
obj.obj_reset_changes()
self.assertEqual({}, obj.obj_get_changes())
def test_object_property(self):
obj = MyObj(self.context, foo=1)
self.assertEqual(1, obj.foo)
def test_object_property_type_error(self):
obj = MyObj(self.context)
def fail():
obj.foo = 'a'
self.assertRaises(ValueError, fail)
def test_load(self):
obj = MyObj(self.context)
self.assertEqual('loaded!', obj.bar)
def test_load_in_base(self):
@base.WatcherObjectRegistry.register_if(False)
class Foo(base.WatcherPersistentObject, base.WatcherObject,
base.WatcherObjectDictCompat):
fields = {'foobar': fields.IntegerField()}
obj = Foo(self.context)
self.assertRaisesRegex(
NotImplementedError, "Cannot load 'foobar' in the base class",
getattr, obj, 'foobar')
def test_loaded_in_primitive(self):
obj = MyObj(self.context)
obj.foo = 1
obj.obj_reset_changes()
self.assertEqual('loaded!', obj.bar)
expected = {'watcher_object.name': 'MyObj',
'watcher_object.namespace': 'watcher',
'watcher_object.version': '1.5',
'watcher_object.changes': ['bar'],
'watcher_object.data': {'foo': 1,
'bar': 'loaded!'}}
self.assertEqual(expected, obj.obj_to_primitive())
def test_changes_in_primitive(self):
obj = MyObj(self.context)
obj.foo = 123
self.assertEqual(set(['foo']), obj.obj_what_changed())
primitive = obj.obj_to_primitive()
self.assertIn('watcher_object.changes', primitive)
obj2 = MyObj.obj_from_primitive(primitive)
self.assertEqual(set(['foo']), obj2.obj_what_changed())
obj2.obj_reset_changes()
self.assertEqual(set(), obj2.obj_what_changed())
def test_unknown_objtype(self):
self.assertRaises(object_exception.UnsupportedObjectError,
base.WatcherObject.obj_class_from_name, 'foo', '1.0')
def test_with_alternate_context(self):
ctxt1 = context.RequestContext('foo', 'foo')
ctxt2 = context.RequestContext(user='alternate')
obj = MyObj.query(ctxt1)
obj.update_test(ctxt2)
self.assertEqual('alternate-context', obj.bar)
def test_orphaned_object(self):
obj = MyObj.query(self.context)
obj._context = None
self.assertRaises(object_exception.OrphanedObjectError,
obj.update_test)
def test_changed_1(self):
obj = MyObj.query(self.context)
obj.foo = 123
self.assertEqual(set(['foo']), obj.obj_what_changed())
obj.update_test(self.context)
self.assertEqual(set(['foo', 'bar']), obj.obj_what_changed())
self.assertEqual(123, obj.foo)
def test_changed_2(self):
obj = MyObj.query(self.context)
obj.foo = 123
self.assertEqual(set(['foo']), obj.obj_what_changed())
obj.save()
self.assertEqual(set([]), obj.obj_what_changed())
self.assertEqual(123, obj.foo)
def test_changed_3(self):
obj = MyObj.query(self.context)
obj.foo = 123
self.assertEqual(set(['foo']), obj.obj_what_changed())
obj.refresh()
self.assertEqual(set([]), obj.obj_what_changed())
self.assertEqual(321, obj.foo)
self.assertEqual('refreshed', obj.bar)
def test_changed_4(self):
obj = MyObj.query(self.context)
obj.bar = 'something'
self.assertEqual(set(['bar']), obj.obj_what_changed())
obj.modify_save_modify(self.context)
self.assertEqual(set(['foo']), obj.obj_what_changed())
self.assertEqual(42, obj.foo)
self.assertEqual('meow', obj.bar)
def test_static_result(self):
obj = MyObj.query(self.context)
self.assertEqual('bar', obj.bar)
result = obj.marco()
self.assertEqual('polo', result)
def test_updates(self):
obj = MyObj.query(self.context)
self.assertEqual(1, obj.foo)
obj.update_test()
self.assertEqual('updated', obj.bar)
def test_base_attributes(self):
dt = datetime.datetime(1955, 11, 5, 0, 0, tzinfo=iso8601.UTC)
datatime = fields.DateTimeField()
obj = MyObj(self.context)
obj.created_at = dt
obj.updated_at = dt
expected = {'watcher_object.name': 'MyObj',
'watcher_object.namespace': 'watcher',
'watcher_object.version': '1.5',
'watcher_object.changes':
['created_at', 'updated_at'],
'watcher_object.data':
{'created_at': datatime.stringify(dt),
'updated_at': datatime.stringify(dt),
}
}
actual = obj.obj_to_primitive()
# watcher_object.changes is built from a set and order is undefined
self.assertEqual(sorted(expected['watcher_object.changes']),
sorted(actual['watcher_object.changes']))
del expected[
'watcher_object.changes'], actual['watcher_object.changes']
self.assertEqual(expected, actual)
def test_contains(self):
obj = MyObj(self.context)
self.assertNotIn('foo', obj)
obj.foo = 1
self.assertIn('foo', obj)
self.assertNotIn('does_not_exist', obj)
def test_obj_attr_is_set(self):
obj = MyObj(self.context, foo=1)
self.assertTrue(obj.obj_attr_is_set('foo'))
self.assertFalse(obj.obj_attr_is_set('bar'))
self.assertRaises(AttributeError, obj.obj_attr_is_set, 'bang')
def test_get(self):
obj = MyObj(self.context, foo=1)
# Foo has value, should not get the default
self.assertEqual(obj.get('foo', 2), 1)
# Foo has value, should return the value without error
self.assertEqual(obj.get('foo'), 1)
# Bar is not loaded, so we should get the default
self.assertEqual(obj.get('bar', 'not-loaded'), 'not-loaded')
# Bar without a default should lazy-load
self.assertEqual(obj.get('bar'), 'loaded!')
# Bar now has a default, but loaded value should be returned
self.assertEqual(obj.get('bar', 'not-loaded'), 'loaded!')
# Invalid attribute should raise AttributeError
self.assertRaises(AttributeError, obj.get, 'nothing')
# ...even with a default
self.assertRaises(AttributeError, obj.get, 'nothing', 3)
def test_object_inheritance(self):
base_fields = (
list(base.WatcherObject.fields) +
list(base.WatcherPersistentObject.fields))
myobj_fields = ['foo', 'bar', 'missing'] + base_fields
myobj3_fields = ['new_field']
self.assertTrue(issubclass(WatcherTestSubclassedObject, MyObj))
self.assertEqual(len(myobj_fields), len(MyObj.fields))
self.assertEqual(set(myobj_fields), set(MyObj.fields.keys()))
self.assertEqual(len(myobj_fields) + len(myobj3_fields),
len(WatcherTestSubclassedObject.fields))
self.assertEqual(set(myobj_fields) | set(myobj3_fields),
set(WatcherTestSubclassedObject.fields.keys()))
def test_get_changes(self):
obj = MyObj(self.context)
self.assertEqual({}, obj.obj_get_changes())
obj.foo = 123
self.assertEqual({'foo': 123}, obj.obj_get_changes())
obj.bar = 'test'
self.assertEqual({'foo': 123, 'bar': 'test'}, obj.obj_get_changes())
obj.obj_reset_changes()
self.assertEqual({}, obj.obj_get_changes())
def test_obj_fields(self):
@base.WatcherObjectRegistry.register_if(False)
class TestObj(base.WatcherPersistentObject, base.WatcherObject,
base.WatcherObjectDictCompat):
fields = {'foo': fields.IntegerField()}
obj_extra_fields = ['bar']
@property
def bar(self):
return 'this is bar'
obj = TestObj(self.context)
self.assertEqual(set(['created_at', 'updated_at', 'deleted_at',
'foo', 'bar']),
set(obj.obj_fields))
def test_refresh_object(self):
@base.WatcherObjectRegistry.register_if(False)
class TestObj(base.WatcherPersistentObject, base.WatcherObject,
base.WatcherObjectDictCompat):
fields = {'foo': fields.IntegerField(),
'bar': fields.StringField()}
obj = TestObj(self.context)
current_obj = TestObj(self.context)
obj.foo = 10
obj.bar = 'obj.bar'
current_obj.foo = 2
current_obj.bar = 'current.bar'
obj.obj_refresh(current_obj)
self.assertEqual(obj.foo, 2)
self.assertEqual(obj.bar, 'current.bar')
def test_obj_constructor(self):
obj = MyObj(self.context, foo=123, bar='abc')
self.assertEqual(123, obj.foo)
self.assertEqual('abc', obj.bar)
self.assertEqual(set(['foo', 'bar']), obj.obj_what_changed())
def test_assign_value_without_DictCompat(self):
class TestObj(base.WatcherObject):
fields = {'foo': fields.IntegerField(),
'bar': fields.StringField()}
obj = TestObj(self.context)
obj.foo = 10
err_message = ''
try:
obj['bar'] = 'value'
except TypeError as e:
err_message = str(e)
finally:
self.assertIn("'TestObj' object does not support item assignment",
err_message)
class TestObject(_LocalTest, _TestObject):
pass
# The hashes are help developers to check if the change of objects need a
# version bump. It is md5 hash of object fields and remotable methods.
# The fingerprint values should only be changed if there is a version bump.
expected_object_fingerprints = {
'Goal': '1.0-93881622db05e7b67a65ca885b4a022e',
'Strategy': '1.1-73f164491bdd4c034f48083a51bdeb7b',
'AuditTemplate': '1.1-b291973ffc5efa2c61b24fe34fdccc0b',
'Audit': '1.7-19bc991c0b048263df021a36c8624f4d',
'ActionPlan': '2.2-3331270cb3666c93408934826d03c08d',
'Action': '2.0-1dd4959a7e7ac30c62ef170fe08dd935',
'EfficacyIndicator': '1.0-655b71234a82bc7478aff964639c4bb0',
'ScoringEngine': '1.0-4abbe833544000728e17bd9e83f97576',
'Service': '1.0-4b35b99ada9677a882c9de2b30212f35',
'MyObj': '1.5-23c516d1e842f365f694e688d34e47c3',
'ActionDescription': '1.0-5761a3d16651046e7a0c357b57a6583e'
}
def get_watcher_objects():
"""Get Watcher versioned objects
This returns a dict of versioned objects which are
in the Watcher project namespace only. ie excludes
objects from os-vif and other 3rd party modules
:return: a dict mapping class names to lists of versioned objects
"""
all_classes = base.WatcherObjectRegistry.obj_classes()
watcher_classes = {}
for name in all_classes:
objclasses = all_classes[name]
if (objclasses[0].OBJ_PROJECT_NAMESPACE !=
base.WatcherObject.OBJ_PROJECT_NAMESPACE):
continue
watcher_classes[name] = objclasses
return watcher_classes
class TestObjectVersions(test_base.TestCase):
def test_object_version_check(self):
classes = base.WatcherObjectRegistry.obj_classes()
checker = object_fixture.ObjectVersionChecker(obj_classes=classes)
# Compute the difference between actual fingerprints and
# expect fingerprints. expect = actual = {} if there is no change.
expect, actual = checker.test_hashes(expected_object_fingerprints)
self.assertEqual(expect, actual,
"Some objects fields or remotable methods have been "
"modified. Please make sure the version of those "
"objects have been bumped and then update "
"expected_object_fingerprints with the new hashes. ")
class TestObjectSerializer(test_base.TestCase):
def test_object_serialization(self):
ser = base.WatcherObjectSerializer()
obj = MyObj(self.context)
primitive = ser.serialize_entity(self.context, obj)
self.assertIn('watcher_object.name', primitive)
obj2 = ser.deserialize_entity(self.context, primitive)
self.assertIsInstance(obj2, MyObj)
self.assertEqual(self.context, obj2._context)
def test_object_serialization_iterables(self):
ser = base.WatcherObjectSerializer()
obj = MyObj(self.context)
for iterable in (list, tuple, set):
thing = iterable([obj])
primitive = ser.serialize_entity(self.context, thing)
self.assertEqual(1, len(primitive))
for item in primitive:
self.assertFalse(isinstance(item, base.WatcherObject))
thing2 = ser.deserialize_entity(self.context, primitive)
self.assertEqual(1, len(thing2))
for item in thing2:
self.assertIsInstance(item, MyObj)
@mock.patch('watcher.objects.base.WatcherObject.indirection_api')
def _test_deserialize_entity_newer(self, obj_version, backported_to,
mock_indirection_api,
my_version='1.6'):
ser = base.WatcherObjectSerializer()
mock_indirection_api.object_backport_versions.return_value \
= 'backported'
@base.WatcherObjectRegistry.register
class MyTestObj(MyObj):
VERSION = my_version
obj = MyTestObj(self.context)
obj.VERSION = obj_version
primitive = obj.obj_to_primitive()
result = ser.deserialize_entity(self.context, primitive)
if backported_to is None:
self.assertFalse(
mock_indirection_api.object_backport_versions.called)
else:
self.assertEqual('backported', result)
versions = object_base.obj_tree_get_versions('MyTestObj')
mock_indirection_api.object_backport_versions.assert_called_with(
self.context, primitive, versions)
def test_deserialize_entity_newer_version_backports(self):
"Test object with unsupported (newer) version"
self._test_deserialize_entity_newer('1.25', '1.6')
def test_deserialize_entity_same_revision_does_not_backport(self):
"Test object with supported revision"
self._test_deserialize_entity_newer('1.6', None)
def test_deserialize_entity_newer_revision_does_not_backport_zero(self):
"Test object with supported revision"
self._test_deserialize_entity_newer('1.6.0', None)
def test_deserialize_entity_newer_revision_does_not_backport(self):
"Test object with supported (newer) revision"
self._test_deserialize_entity_newer('1.6.1', None)
def test_deserialize_entity_newer_version_passes_revision(self):
"Test object with unsupported (newer) version and revision"
self._test_deserialize_entity_newer('1.7', '1.6.1', my_version='1.6.1')
class TestRegistry(test_base.TestCase):
@mock.patch('watcher.objects.base.objects')
def test_hook_chooses_newer_properly(self, mock_objects):
reg = base.WatcherObjectRegistry()
reg.registration_hook(MyObj, 0)
class MyNewerObj(object):
VERSION = '1.123'
@classmethod
def obj_name(cls):
return 'MyObj'
self.assertEqual(MyObj, mock_objects.MyObj)
reg.registration_hook(MyNewerObj, 0)
self.assertEqual(MyNewerObj, mock_objects.MyObj)
@mock.patch('watcher.objects.base.objects')
def test_hook_keeps_newer_properly(self, mock_objects):
reg = base.WatcherObjectRegistry()
reg.registration_hook(MyObj, 0)
class MyOlderObj(object):
VERSION = '1.1'
@classmethod
def obj_name(cls):
return 'MyObj'
self.assertEqual(MyObj, mock_objects.MyObj)
reg.registration_hook(MyOlderObj, 0)
self.assertEqual(MyObj, mock_objects.MyObj)
|
py | b40e355d05a965481b941981cde093d79d24a19b | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# stateflow documentation build configuration file, created by
# sphinx-quickstart on Fri Jul 27 19:57:38 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
# sys.path.insert(0, os.path.abspath('..'))
path = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.join(path, '..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'stateflow'
copyright = u'2018, Tomasz Łakota'
author = u'Tomasz Łakota'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.1'
# The full version, including alpha/beta/rc tags.
release = '0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
# html_sidebars = {
# '**': [
# 'about.html',
# 'navigation.html',
# 'relations.html', # needs 'show_related': True theme option to display
# 'searchbox.html',
# 'donate.html',
# ]
# }
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'stateflowdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'stateflow.tex', 'stateflow Documentation',
u'Tomasz Łakota', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'stateflow', 'stateflow Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'stateflow', 'stateflow Documentation',
author, 'stateflow', 'One line description of project.',
'Miscellaneous'),
]
|
py | b40e35b00ea902311f95b26d98ee708f226934f9 | import tensorflow.keras.backend as K
import tensorflow as tf
def mae(y_true, y_pred):
"""
Mean absolute error regression loss.
Args:
y_true ([np.array]): test samples
y_pred ([np.array]): predicted samples
Returns:
[float]: mean absolute error
"""
return K.mean(K.abs(y_pred - y_true), axis=-1)
def mse(y_true, y_pred):
"""
Mean squared error regression loss.
Args:
y_true ([np.array]): test samples
y_pred ([np.array]): predicted samples
Returns:
[float]: mean squared error
"""
return K.mean(K.square(y_pred - y_true), axis=-1)
def mape(y_true, y_pred):
"""
Mean absolute percentage error regression loss.
Args:
y_true ([np.array]): test samples
y_pred ([np.array]): predicted samples
Returns:
[float]: mean absolute percentage error
"""
diff = K.abs((y_true - y_pred) / K.clip(K.abs(y_true), K.epsilon(), None))
return 100. * K.mean(diff, axis=-1)
def msle(y_true, y_pred):
"""
Mean squared logarithmic error regression loss.
Args:
y_true ([np.array]): test samples
y_pred ([np.array]): predicted samples
Returns:
[float]: mean squared logarithmic error
"""
first_log = K.log(K.clip(y_pred, K.epsilon(), None) + 1.)
second_log = K.log(K.clip(y_true, K.epsilon(), None) + 1.)
return K.mean(K.square(first_log - second_log), axis=-1)
def r2(y_true, y_pred):
"""
:math:`R^2` (coefficient of determination) regression score function.
Best possible score is 1.0, lower values are worse.
Args:
y_true ([np.array]): test samples
y_pred ([np.array]): predicted samples
Returns:
[float]: R2
"""
SS_res = tf.reduce_sum(tf.square(y_true - y_pred), axis=-1)
SS_tot = tf.reduce_sum(tf.square(y_true - tf.reduce_mean(y_true, axis=-1)), axis=-1)
return (1 - SS_res/(SS_tot + tf.keras.backend.epsilon()))
def adj_r2(y_true, y_pred):
"""
Adjusted R2 regression score function with default inputs.
Best possible score is 1.0, lower values are worse.
Args:
y_true ([np.array]): test samples
y_pred ([np.array]): predicted samples
Returns:
[float]: adjusted R2
"""
SS_res = tf.reduce_sum(tf.square(y_true - y_pred), axis=-1)
SS_tot = tf.reduce_sum(tf.square(y_true - tf.reduce_mean(y_true, axis=-1)), axis=-1)
return (1 - SS_res/(SS_tot + tf.keras.backend.epsilon())) * (1 - (1 - r2(y_true, y_pred)) * (tf.cast(tf.size(y_true), tf.float32) - 1) / (tf.cast(tf.size(y_true), tf.float32) - tf.cast(tf.rank(y_true), tf.float32) - 1))
# SS_res = tf.reduce_sum(tf.square(y_true - y_pred), axis=-1)
# SS_tot = tf.reduce_sum(tf.square(y_true - tf.reduce_mean(y_true, axis=-1)), axis=-1)
# adj_SS_res = tf.cast(SS_res / (K.shape(y_true)[0] - 1), tf.int32)
# adj_SS_tot = tf.cast(SS_tot / (K.shape(y_true)[0] - 1), tf.int32)
# return (1 - adj_SS_res/(adj_SS_tot + tf.keras.backend.epsilon()))
def rmsle(y_true, y_pred):
"""
Root Mean Squared Logarithm Error
Args:
y_true ([np.array]): test samples
y_pred ([np.array]): predicted samples
Returns:
[float]: root mean squared logarithm error
"""
first_log = K.log(K.clip(y_pred, K.epsilon(), None) + 1.)
second_log = K.log(K.clip(y_true, K.epsilon(), None) + 1.)
return K.sqrt(K.mean(K.square(first_log - second_log), axis=-1))
def rmse(y_true, y_pred):
"""
Root Mean Squared Error
Args:
y_true ([np.array]): test samples
y_pred ([np.array]): predicted samples
Returns:
[float]: root mean squared error
"""
return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1))
def smape(y_true, y_pred):
"""
Symmetric mean absolute percentage error regression loss.
Args:
y_true ([np.array]): test samples
y_pred ([np.array]): predicted samples
Returns:
[float]: symmetric mean absolute percentage error
"""
diff = K.abs((y_true - y_pred) / K.clip(K.abs(y_true), K.epsilon(), None))
return 100. * K.mean(K.mean(diff, axis=-1))
def smape_log(y_true, y_pred):
"""
Symmetric mean absolute percentage error regression loss.
Args:
y_true ([np.array]): test samples
y_pred ([np.array]): predicted samples
Returns:
[float]: symmetric mean absolute percentage error
"""
diff = K.abs((y_true - y_pred) / K.clip(K.abs(y_true), K.epsilon(), None))
return K.log(K.mean(K.mean(diff, axis=-1)))
def nrmse(y_true, y_pred):
"""
Normalized Root Mean Squared Error
Args:
y_true ([np.array]): test samples
y_pred ([np.array]): predicted samples
Returns:
[float]: normalized root mean squared error
"""
return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1)) / K.mean(K.abs(y_true), axis=-1)
|
py | b40e3603453e0c1f692595a09b56a43b21d1782d | #!python
# raw2csv
# Translates SPICE3 raw data into CSV file format.
# Used, for example, in conjunction with ngSpice simulator output data.
#
# > raw2csv.py [-o <output_file.csv>] [-t] <input_raw_file>
#
# -o : output csv file. Default is "out.csv" in the current directory
# -t : transpose output. By default data is aligned in column
# <input_raw_file>: mandatory input file
#
# Under MIT License (MIT)
# Copyright (C) 2021 Gabriele Bellini <Switzerland>
import argparse
import sys
import os
import time
parser = argparse.ArgumentParser(description='Translate SPICE3 raw data into CSV file format')
parser.add_argument('-t',
action='store_true',
dest='t',
help='transpose data'
)
parser.add_argument('-o',
default='out.csv',
dest='outfile',
help='output CSV filename (default is out.csv)',
type=str
)
parser.add_argument('inputfile',
help='input raw file (mandatory)',
type=str
)
args=parser.parse_args()
transpose=args.t
starttime = time.time()
print('Input file : ' + args.inputfile)
print('Output file : ' + args.outfile)
result = []
result_line = []
line_splitted = []
f = open(args.inputfile, 'r')
while True:
currline = f.readline()
if ("" == currline):
break
if (currline.find('No. Variables:') >= 0): # search for nb of variables
line_splitted = currline.split(':')
nb_variables = line_splitted[1].rstrip('\n')
print('found '+nb_variables+' variables')
break
while True:
currline = f.readline()
if ("" == currline):
break
if (currline.find('Variables:') >= 0):
#print('Variables')
break
for i in range(int(nb_variables)):
currline = f.readline()
if ("" == currline):
break
currline = currline.strip('\t')
currline = currline.rstrip('\n')
result_line = (currline.split('\t'))
result.append(result_line)
while True:
currline = f.readline()
if ("" == currline):
break
if (currline.find('Values:') >= 0):
print('Processing values')
break
endoffile=0
while True:
for i in range(int(nb_variables)):
currline = f.readline()
if ("" == currline):
endoffile=1
break
if len(currline.strip())>0: # ignore empty lines
#print(currline)
line_splitted = currline.split('\t')
result[i].append(line_splitted[-1].rstrip('\n')) # take last/actual value and cleanup newline
if (endoffile>0):
break
f.close()
new_result=[]
##############################
# Transpose ?
if (transpose):
new_result = [[result[j][i] for j in range(len(result))] for i in range(len(result[0]))]
else:
new_result = result
##############################
# Write result to file
f = open(args.outfile, 'w')
for n in new_result:
s=",".join(n)
s=s+"\n"
#print(s)
f.write(s)
f.close()
#for n in new_result:
# print(n)
endtime = time.time()
print('Execution time in seconds: ', (endtime-starttime))
|
py | b40e37204307bc2f548ea2e9999a811d6f0d715e | """Pytest configuration module."""
from unittest.mock import patch
import pytest
from flake8_koles.checker import KolesChecker
@pytest.fixture
def koles_checker():
"""Return clean KolesChecker instance."""
with patch('flake8_koles.checker.KolesChecker.__init__') as mock_init:
mock_init.return_value = None
koles_checker = KolesChecker('test_tree', 'test_filename')
yield koles_checker
|
py | b40e3748ab96a64c069a1b046d22c44303ada73c | import math
fileIn = "DieAssembly.xic"
fileOut = "print.eps"
size = 8*72 # Final size of postcript output page in pt
psdigits = 10.
###############################################
## Determine scaling factor from bounding box
###############################################
fxic = open(fileIn,"r")
firstloop = True
for line in fxic:
args=line.rstrip().rstrip(';').split() #strip newline then split
if args[0]=='P':
# Extract coordinate pairs and scale
coords = []
for i in range((len(args)-1)/2):
coords.append((int(args[2*i+1]),int(args[2*i+2])))
# Find bounding box
if firstloop:
xboxmin = xboxmax = coords[0][0]
yboxmin = yboxmax = coords[0][1]
firstloop = False
for x,y in coords:
xboxmin = min(xboxmin,x)
xboxmax = max(xboxmax,x)
yboxmin = min(yboxmin,y)
yboxmax = max(yboxmax,y)
fxic.close()
# Scale factor
scale = psdigits*size/float(max(xboxmax-xboxmin,yboxmax-yboxmin))
################################
## Determine shared edges
################################
fxic = open(fileIn,"r")
layer = 'none'
edgeList = {}
for line in fxic:
args=line.rstrip().rstrip(';').split() #strip newline then split
if args[0]=='L':
layer = args[1]
elif args[0]=='P':
# Extract coordinate pairs and scale
coords = []
for i in range((len(args)-1)/2):
coords.append((int(scale*(int(args[2*i+1])-xboxmin)),int(scale*(int(args[2*i+2])-yboxmin))))
# Record all edges with their layer name.
(xp,yp) = coords[len(coords)-1]
for x,y in coords:
key1 = (layer,xp,yp,x,y)
key2 = (layer,x,y,xp,yp)
if edgeList.has_key(key1):
edgeList[key1] = 1 # Edge is shared
elif edgeList.has_key(key2):
edgeList[key2] = 1
else:
edgeList[key1] = 0 # Create key
xp = x
yp = y
fxic.close()
################################
## Output geometry to postscript
################################
fxic = open(fileIn,"r")
psfile = open(fileOut,"w")
psfile.write("%!\n")
psfile.write("%%%%BoundingBox: 0 0 %i %i\n" % (int((xboxmax-xboxmin)*scale/psdigits),int((yboxmax-yboxmin)*scale/psdigits)))
psfile.write("1 setlinewidth\n")
psfile.write("%f %f scale\n" % (1./psdigits, 1./psdigits))
layer = 'none'
for line in fxic:
args=line.rstrip().rstrip(';').split() #strip newline then split
if args[0]=='L':
layer = args[1]
if layer=='MTL1':
color = ( 140./255., 10./255., 10./255. ) # RGB
fillpattern = 1 # 0-none, 1-lines
fillcolor = ( 0.8, 0.37, 0.1 ) # RGB
fillangle = -45
fillspace = 0.05*psdigits*72
elif layer=='MTL2':
color = ( 30./255., 128./255., 46./255. ) # RGB
fillpattern = 1 # 0-none, 1-lines
fillcolor = ( 0.5, 0.8, 0 ) # RGB
fillangle = 45
fillspace = 0.05*psdigits*72
elif layer=='OCUT':
color = ( 46./255. , 30./255., 128./255. ) # RGB
fillpattern = 1 # 0-none, 1-lines
fillcolor = ( 0, 0.5, 0.8 ) # RGB
fillangle = 45
fillspace = 0.025*psdigits*72
else:
color = ( 0., 0., 0. ) # RGB
fillpattern = 0 # 0-none, 1-lines
psfile.write("%f %f %f setrgbcolor\n" % color)
elif args[0]=='P':
# Extract coordinate pairs and scale
coords = []
for i in range((len(args)-1)/2):
coords.append((int(scale*(int(args[2*i+1])-xboxmin)),int(scale*(int(args[2*i+2])-yboxmin))))
# Draw unshared boundary edges
(xp,yp) = coords[len(coords)-1]
for x,y in coords:
key1 = (layer,xp,yp,x,y)
key2 = (layer,x,y,xp,yp)
if edgeList.has_key(key1) and edgeList[key1]==0:
psfile.write("newpath %i %i moveto %i %i lineto stroke\n" \
% (xp,yp,x,y))
elif edgeList.has_key(key2) and edgeList[key2]==0:
psfile.write("newpath %i %i moveto %i %i lineto stroke\n" \
% (xp,yp,x,y))
xp = x
yp = y
# Find bounding box for hatch fills
xmin = xmax = coords[0][0]
ymin = ymax = coords[0][1]
for x,y in coords:
xmin = min(xmin,x)
xmax = max(xmax,x)
ymin = min(ymin,y)
ymax = max(ymax,y)
# Draw hatching using a clipping path
if fillpattern != 0:
psfile.write("gsave %f %f %f setrgbcolor\n" % fillcolor)
psfile.write("newpath %i %i moveto\n" % (coords[0][0], coords[0][1]))
for x,y in coords[1:]:
psfile.write("%i %i lineto\n" % (x, y))
psfile.write("closepath clip\n")
if fillpattern==1 and fillangle==-45:
imin = int(math.floor((xmin+ymin)/fillspace/math.sqrt(2)))
imax = int(math.ceil((xmax+ymax)/fillspace/math.sqrt(2)))
for i in range(imin,imax+1):
xa = int(i*fillspace*math.sqrt(2))-ymin
xb = xa-(ymax-ymin)
psfile.write("newpath %i %i moveto %i %i lineto stroke\n" \
% (xa,ymin,xb,ymax))
elif fillpattern==1 and fillangle==45:
imin = int(math.floor((xmin-ymax)/fillspace/math.sqrt(2)))
imax = int(math.ceil((xmax-ymin)/fillspace/math.sqrt(2)))
for i in range(imin,imax+1):
xa = int(i*fillspace*math.sqrt(2))+ymax
xb = xa-(ymax-ymin)
psfile.write("newpath %i %i moveto %i %i lineto stroke\n" \
% (xa,ymax,xb,ymin))
if fillpattern != 0:
psfile.write("grestore\n")
fxic.close()
psfile.close()
|
py | b40e376f4b8816f8af731886e7d994bb818134d8 | from django.conf import settings
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import filters, permissions, viewsets
from ..models import SoggettoFiscale
from ..serializers import SoggettoFiscaleSerializer
permission = permissions.AllowAny if settings.DEBUG else permissions.IsAuthenticated
class SoggettoFiscaleViewSet(viewsets.ModelViewSet):
permission_classes = [permission]
queryset = SoggettoFiscale.objects.all()
serializer_class = SoggettoFiscaleSerializer
filter_backends = [filters.SearchFilter, DjangoFilterBackend]
filterset_fields = ["utenti"]
search_fields = [
"denominazione",
"nome",
"cognome",
"id_fiscale_iva_codice",
"codice_fiscale",
]
|
py | b40e379407834c2a22e3abbdd8ce6c05dab38956 | from .app import App, mainApp
from .entity import Entity
from .tools import background, calcFunction
def init():
global mainApp
if not mainApp:
mainApp = App()
return mainApp
|
py | b40e383aa3db8a344801355c2b78cf08717fef71 | """
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from cryptoapis.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from cryptoapis.exceptions import ApiAttributeError
def lazy_import():
from cryptoapis.model.get_transaction_details_by_transaction_idribsbsc_gas_price import GetTransactionDetailsByTransactionIDRIBSBSCGasPrice
globals()['GetTransactionDetailsByTransactionIDRIBSBSCGasPrice'] = GetTransactionDetailsByTransactionIDRIBSBSCGasPrice
class GetTransactionDetailsByTransactionIDRIBSBSC(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'contract': (str,), # noqa: E501
'gas_limit': (str,), # noqa: E501
'gas_price': (GetTransactionDetailsByTransactionIDRIBSBSCGasPrice,), # noqa: E501
'gas_used': (str,), # noqa: E501
'input_data': (str,), # noqa: E501
'nonce': (int,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'contract': 'contract', # noqa: E501
'gas_limit': 'gasLimit', # noqa: E501
'gas_price': 'gasPrice', # noqa: E501
'gas_used': 'gasUsed', # noqa: E501
'input_data': 'inputData', # noqa: E501
'nonce': 'nonce', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, contract, gas_limit, gas_price, gas_used, input_data, nonce, *args, **kwargs): # noqa: E501
"""GetTransactionDetailsByTransactionIDRIBSBSC - a model defined in OpenAPI
Args:
contract (str): Represents the specific transaction contract
gas_limit (str): Represents the amount of gas used by this specific transaction alone.
gas_price (GetTransactionDetailsByTransactionIDRIBSBSCGasPrice):
gas_used (str): Defines the unit of the gas price amount, e.g. BTC, ETH, XRP.
input_data (str): Represents additional information that is required for the transaction.
nonce (int): Represents the sequential running number for an address, starting from 0 for the first transaction. E.g., if the nonce of a transaction is 10, it would be the 11th transaction sent from the sender's address.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.contract = contract
self.gas_limit = gas_limit
self.gas_price = gas_price
self.gas_used = gas_used
self.input_data = input_data
self.nonce = nonce
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, contract, gas_limit, gas_price, gas_used, input_data, nonce, *args, **kwargs): # noqa: E501
"""GetTransactionDetailsByTransactionIDRIBSBSC - a model defined in OpenAPI
Args:
contract (str): Represents the specific transaction contract
gas_limit (str): Represents the amount of gas used by this specific transaction alone.
gas_price (GetTransactionDetailsByTransactionIDRIBSBSCGasPrice):
gas_used (str): Defines the unit of the gas price amount, e.g. BTC, ETH, XRP.
input_data (str): Represents additional information that is required for the transaction.
nonce (int): Represents the sequential running number for an address, starting from 0 for the first transaction. E.g., if the nonce of a transaction is 10, it would be the 11th transaction sent from the sender's address.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.contract = contract
self.gas_limit = gas_limit
self.gas_price = gas_price
self.gas_used = gas_used
self.input_data = input_data
self.nonce = nonce
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
|
py | b40e3905395ef71547f0e2df063ffe40db061eac | #!/usr/bin/env python3
# Copyright (c) 2014-2018 The Vendetta Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test behavior of headers messages to announce blocks.
Setup:
- Two nodes:
- node0 is the node-under-test. We create two p2p connections to it. The
first p2p connection is a control and should only ever receive inv's. The
second p2p connection tests the headers sending logic.
- node1 is used to create reorgs.
test_null_locators
==================
Sends two getheaders requests with null locator values. First request's hashstop
value refers to validated block, while second request's hashstop value refers to
a block which hasn't been validated. Verifies only the first request returns
headers.
test_nonnull_locators
=====================
Part 1: No headers announcements before "sendheaders"
a. node mines a block [expect: inv]
send getdata for the block [expect: block]
b. node mines another block [expect: inv]
send getheaders and getdata [expect: headers, then block]
c. node mines another block [expect: inv]
peer mines a block, announces with header [expect: getdata]
d. node mines another block [expect: inv]
Part 2: After "sendheaders", headers announcements should generally work.
a. peer sends sendheaders [expect: no response]
peer sends getheaders with current tip [expect: no response]
b. node mines a block [expect: tip header]
c. for N in 1, ..., 10:
* for announce-type in {inv, header}
- peer mines N blocks, announces with announce-type
[ expect: getheaders/getdata or getdata, deliver block(s) ]
- node mines a block [ expect: 1 header ]
Part 3: Headers announcements stop after large reorg and resume after getheaders or inv from peer.
- For response-type in {inv, getheaders}
* node mines a 7 block reorg [ expect: headers announcement of 8 blocks ]
* node mines an 8-block reorg [ expect: inv at tip ]
* peer responds with getblocks/getdata [expect: inv, blocks ]
* node mines another block [ expect: inv at tip, peer sends getdata, expect: block ]
* node mines another block at tip [ expect: inv ]
* peer responds with getheaders with an old hashstop more than 8 blocks back [expect: headers]
* peer requests block [ expect: block ]
* node mines another block at tip [ expect: inv, peer sends getdata, expect: block ]
* peer sends response-type [expect headers if getheaders, getheaders/getdata if mining new block]
* node mines 1 block [expect: 1 header, peer responds with getdata]
Part 4: Test direct fetch behavior
a. Announce 2 old block headers.
Expect: no getdata requests.
b. Announce 3 new blocks via 1 headers message.
Expect: one getdata request for all 3 blocks.
(Send blocks.)
c. Announce 1 header that forks off the last two blocks.
Expect: no response.
d. Announce 1 more header that builds on that fork.
Expect: one getdata request for two blocks.
e. Announce 16 more headers that build on that fork.
Expect: getdata request for 14 more blocks.
f. Announce 1 more header that builds on that fork.
Expect: no response.
Part 5: Test handling of headers that don't connect.
a. Repeat 10 times:
1. Announce a header that doesn't connect.
Expect: getheaders message
2. Send headers chain.
Expect: getdata for the missing blocks, tip update.
b. Then send 9 more headers that don't connect.
Expect: getheaders message each time.
c. Announce a header that does connect.
Expect: no response.
d. Announce 49 headers that don't connect.
Expect: getheaders message each time.
e. Announce one more that doesn't connect.
Expect: disconnect.
"""
from test_framework.blocktools import create_block, create_coinbase
from test_framework.messages import CInv
from test_framework.mininode import (
CBlockHeader,
NODE_WITNESS,
P2PInterface,
mininode_lock,
msg_block,
msg_getblocks,
msg_getdata,
msg_getheaders,
msg_headers,
msg_inv,
msg_sendheaders,
)
from test_framework.test_framework import VendettaTestFramework
from test_framework.util import (
assert_equal,
sync_blocks,
wait_until,
)
DIRECT_FETCH_RESPONSE_TIME = 0.05
class BaseNode(P2PInterface):
def __init__(self):
super().__init__()
self.block_announced = False
self.last_blockhash_announced = None
self.recent_headers_announced = []
def send_get_data(self, block_hashes):
"""Request data for a list of block hashes."""
msg = msg_getdata()
for x in block_hashes:
msg.inv.append(CInv(2, x))
self.send_message(msg)
def send_get_headers(self, locator, hashstop):
msg = msg_getheaders()
msg.locator.vHave = locator
msg.hashstop = hashstop
self.send_message(msg)
def send_block_inv(self, blockhash):
msg = msg_inv()
msg.inv = [CInv(2, blockhash)]
self.send_message(msg)
def send_header_for_blocks(self, new_blocks):
headers_message = msg_headers()
headers_message.headers = [CBlockHeader(b) for b in new_blocks]
self.send_message(headers_message)
def send_getblocks(self, locator):
getblocks_message = msg_getblocks()
getblocks_message.locator.vHave = locator
self.send_message(getblocks_message)
def wait_for_getdata(self, hash_list, timeout=60):
if hash_list == []:
return
test_function = lambda: "getdata" in self.last_message and [x.hash for x in self.last_message["getdata"].inv] == hash_list
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_block_announcement(self, block_hash, timeout=60):
test_function = lambda: self.last_blockhash_announced == block_hash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def on_inv(self, message):
self.block_announced = True
self.last_blockhash_announced = message.inv[-1].hash
def on_headers(self, message):
if len(message.headers):
self.block_announced = True
for x in message.headers:
x.calc_sha256()
# append because headers may be announced over multiple messages.
self.recent_headers_announced.append(x.sha256)
self.last_blockhash_announced = message.headers[-1].sha256
def clear_block_announcements(self):
with mininode_lock:
self.block_announced = False
self.last_message.pop("inv", None)
self.last_message.pop("headers", None)
self.recent_headers_announced = []
def check_last_headers_announcement(self, headers):
"""Test whether the last headers announcements received are right.
Headers may be announced across more than one message."""
test_function = lambda: (len(self.recent_headers_announced) >= len(headers))
wait_until(test_function, timeout=60, lock=mininode_lock)
with mininode_lock:
assert_equal(self.recent_headers_announced, headers)
self.block_announced = False
self.last_message.pop("headers", None)
self.recent_headers_announced = []
def check_last_inv_announcement(self, inv):
"""Test whether the last announcement received had the right inv.
inv should be a list of block hashes."""
test_function = lambda: self.block_announced
wait_until(test_function, timeout=60, lock=mininode_lock)
with mininode_lock:
compare_inv = []
if "inv" in self.last_message:
compare_inv = [x.hash for x in self.last_message["inv"].inv]
assert_equal(compare_inv, inv)
self.block_announced = False
self.last_message.pop("inv", None)
class SendHeadersTest(VendettaTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def mine_blocks(self, count):
"""Mine count blocks and return the new tip."""
# Clear out block announcements from each p2p listener
[x.clear_block_announcements() for x in self.nodes[0].p2ps]
self.nodes[0].generate(count)
return int(self.nodes[0].getbestblockhash(), 16)
def mine_reorg(self, length):
"""Mine a reorg that invalidates length blocks (replacing them with # length+1 blocks).
Note: we clear the state of our p2p connections after the
to-be-reorged-out blocks are mined, so that we don't break later tests.
return the list of block hashes newly mined."""
self.nodes[0].generate(length) # make sure all invalidated blocks are node0's
sync_blocks(self.nodes, wait=0.1)
for x in self.nodes[0].p2ps:
x.wait_for_block_announcement(int(self.nodes[0].getbestblockhash(), 16))
x.clear_block_announcements()
tip_height = self.nodes[1].getblockcount()
hash_to_invalidate = self.nodes[1].getblockhash(tip_height - (length - 1))
self.nodes[1].invalidateblock(hash_to_invalidate)
all_hashes = self.nodes[1].generate(length + 1) # Must be longer than the orig chain
sync_blocks(self.nodes, wait=0.1)
return [int(x, 16) for x in all_hashes]
def run_test(self):
# Setup the p2p connections
inv_node = self.nodes[0].add_p2p_connection(BaseNode())
# Make sure NODE_NETWORK is not set for test_node, so no block download
# will occur outside of direct fetching
test_node = self.nodes[0].add_p2p_connection(BaseNode(), services=NODE_WITNESS)
# Ensure verack's have been processed by our peer
inv_node.sync_with_ping()
test_node.sync_with_ping()
self.test_null_locators(test_node, inv_node)
self.test_nonnull_locators(test_node, inv_node)
def test_null_locators(self, test_node, inv_node):
tip = self.nodes[0].getblockheader(self.nodes[0].generate(1)[0])
tip_hash = int(tip["hash"], 16)
inv_node.check_last_inv_announcement(inv=[tip_hash])
test_node.check_last_inv_announcement(inv=[tip_hash])
self.log.info("Verify getheaders with null locator and valid hashstop returns headers.")
test_node.clear_block_announcements()
test_node.send_get_headers(locator=[], hashstop=tip_hash)
test_node.check_last_headers_announcement(headers=[tip_hash])
self.log.info("Verify getheaders with null locator and invalid hashstop does not return headers.")
block = create_block(int(tip["hash"], 16), create_coinbase(tip["height"] + 1), tip["mediantime"] + 1)
block.solve()
test_node.send_header_for_blocks([block])
test_node.clear_block_announcements()
test_node.send_get_headers(locator=[], hashstop=int(block.hash, 16))
test_node.sync_with_ping()
assert_equal(test_node.block_announced, False)
inv_node.clear_block_announcements()
test_node.send_message(msg_block(block))
inv_node.check_last_inv_announcement(inv=[int(block.hash, 16)])
def test_nonnull_locators(self, test_node, inv_node):
tip = int(self.nodes[0].getbestblockhash(), 16)
# PART 1
# 1. Mine a block; expect inv announcements each time
self.log.info("Part 1: headers don't start before sendheaders message...")
for i in range(4):
self.log.debug("Part 1.{}: starting...".format(i))
old_tip = tip
tip = self.mine_blocks(1)
inv_node.check_last_inv_announcement(inv=[tip])
test_node.check_last_inv_announcement(inv=[tip])
# Try a few different responses; none should affect next announcement
if i == 0:
# first request the block
test_node.send_get_data([tip])
test_node.wait_for_block(tip)
elif i == 1:
# next try requesting header and block
test_node.send_get_headers(locator=[old_tip], hashstop=tip)
test_node.send_get_data([tip])
test_node.wait_for_block(tip)
test_node.clear_block_announcements() # since we requested headers...
elif i == 2:
# this time announce own block via headers
inv_node.clear_block_announcements()
height = self.nodes[0].getblockcount()
last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time']
block_time = last_time + 1
new_block = create_block(tip, create_coinbase(height + 1), block_time)
new_block.solve()
test_node.send_header_for_blocks([new_block])
test_node.wait_for_getdata([new_block.sha256])
test_node.send_message(msg_block(new_block))
test_node.sync_with_ping() # make sure this block is processed
wait_until(lambda: inv_node.block_announced, timeout=60, lock=mininode_lock)
inv_node.clear_block_announcements()
test_node.clear_block_announcements()
self.log.info("Part 1: success!")
self.log.info("Part 2: announce blocks with headers after sendheaders message...")
# PART 2
# 2. Send a sendheaders message and test that headers announcements
# commence and keep working.
test_node.send_message(msg_sendheaders())
prev_tip = int(self.nodes[0].getbestblockhash(), 16)
test_node.send_get_headers(locator=[prev_tip], hashstop=0)
test_node.sync_with_ping()
# Now that we've synced headers, headers announcements should work
tip = self.mine_blocks(1)
inv_node.check_last_inv_announcement(inv=[tip])
test_node.check_last_headers_announcement(headers=[tip])
height = self.nodes[0].getblockcount() + 1
block_time += 10 # Advance far enough ahead
for i in range(10):
self.log.debug("Part 2.{}: starting...".format(i))
# Mine i blocks, and alternate announcing either via
# inv (of tip) or via headers. After each, new blocks
# mined by the node should successfully be announced
# with block header, even though the blocks are never requested
for j in range(2):
self.log.debug("Part 2.{}.{}: starting...".format(i, j))
blocks = []
for b in range(i + 1):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
if j == 0:
# Announce via inv
test_node.send_block_inv(tip)
test_node.wait_for_getheaders()
# Should have received a getheaders now
test_node.send_header_for_blocks(blocks)
# Test that duplicate inv's won't result in duplicate
# getdata requests, or duplicate headers announcements
[inv_node.send_block_inv(x.sha256) for x in blocks]
test_node.wait_for_getdata([x.sha256 for x in blocks])
inv_node.sync_with_ping()
else:
# Announce via headers
test_node.send_header_for_blocks(blocks)
test_node.wait_for_getdata([x.sha256 for x in blocks])
# Test that duplicate headers won't result in duplicate
# getdata requests (the check is further down)
inv_node.send_header_for_blocks(blocks)
inv_node.sync_with_ping()
[test_node.send_message(msg_block(x)) for x in blocks]
test_node.sync_with_ping()
inv_node.sync_with_ping()
# This block should not be announced to the inv node (since it also
# broadcast it)
assert "inv" not in inv_node.last_message
assert "headers" not in inv_node.last_message
tip = self.mine_blocks(1)
inv_node.check_last_inv_announcement(inv=[tip])
test_node.check_last_headers_announcement(headers=[tip])
height += 1
block_time += 1
self.log.info("Part 2: success!")
self.log.info("Part 3: headers announcements can stop after large reorg, and resume after headers/inv from peer...")
# PART 3. Headers announcements can stop after large reorg, and resume after
# getheaders or inv from peer.
for j in range(2):
self.log.debug("Part 3.{}: starting...".format(j))
# First try mining a reorg that can propagate with header announcement
new_block_hashes = self.mine_reorg(length=7)
tip = new_block_hashes[-1]
inv_node.check_last_inv_announcement(inv=[tip])
test_node.check_last_headers_announcement(headers=new_block_hashes)
block_time += 8
# Mine a too-large reorg, which should be announced with a single inv
new_block_hashes = self.mine_reorg(length=8)
tip = new_block_hashes[-1]
inv_node.check_last_inv_announcement(inv=[tip])
test_node.check_last_inv_announcement(inv=[tip])
block_time += 9
fork_point = self.nodes[0].getblock("%02x" % new_block_hashes[0])["previousblockhash"]
fork_point = int(fork_point, 16)
# Use getblocks/getdata
test_node.send_getblocks(locator=[fork_point])
test_node.check_last_inv_announcement(inv=new_block_hashes)
test_node.send_get_data(new_block_hashes)
test_node.wait_for_block(new_block_hashes[-1])
for i in range(3):
self.log.debug("Part 3.{}.{}: starting...".format(j, i))
# Mine another block, still should get only an inv
tip = self.mine_blocks(1)
inv_node.check_last_inv_announcement(inv=[tip])
test_node.check_last_inv_announcement(inv=[tip])
if i == 0:
# Just get the data -- shouldn't cause headers announcements to resume
test_node.send_get_data([tip])
test_node.wait_for_block(tip)
elif i == 1:
# Send a getheaders message that shouldn't trigger headers announcements
# to resume (best header sent will be too old)
test_node.send_get_headers(locator=[fork_point], hashstop=new_block_hashes[1])
test_node.send_get_data([tip])
test_node.wait_for_block(tip)
elif i == 2:
# This time, try sending either a getheaders to trigger resumption
# of headers announcements, or mine a new block and inv it, also
# triggering resumption of headers announcements.
test_node.send_get_data([tip])
test_node.wait_for_block(tip)
if j == 0:
test_node.send_get_headers(locator=[tip], hashstop=0)
test_node.sync_with_ping()
else:
test_node.send_block_inv(tip)
test_node.sync_with_ping()
# New blocks should now be announced with header
tip = self.mine_blocks(1)
inv_node.check_last_inv_announcement(inv=[tip])
test_node.check_last_headers_announcement(headers=[tip])
self.log.info("Part 3: success!")
self.log.info("Part 4: Testing direct fetch behavior...")
tip = self.mine_blocks(1)
height = self.nodes[0].getblockcount() + 1
last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time']
block_time = last_time + 1
# Create 2 blocks. Send the blocks, then send the headers.
blocks = []
for b in range(2):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
inv_node.send_message(msg_block(blocks[-1]))
inv_node.sync_with_ping() # Make sure blocks are processed
test_node.last_message.pop("getdata", None)
test_node.send_header_for_blocks(blocks)
test_node.sync_with_ping()
# should not have received any getdata messages
with mininode_lock:
assert "getdata" not in test_node.last_message
# This time, direct fetch should work
blocks = []
for b in range(3):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
test_node.send_header_for_blocks(blocks)
test_node.sync_with_ping()
test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=DIRECT_FETCH_RESPONSE_TIME)
[test_node.send_message(msg_block(x)) for x in blocks]
test_node.sync_with_ping()
# Now announce a header that forks the last two blocks
tip = blocks[0].sha256
height -= 1
blocks = []
# Create extra blocks for later
for b in range(20):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
# Announcing one block on fork should not trigger direct fetch
# (less work than tip)
test_node.last_message.pop("getdata", None)
test_node.send_header_for_blocks(blocks[0:1])
test_node.sync_with_ping()
with mininode_lock:
assert "getdata" not in test_node.last_message
# Announcing one more block on fork should trigger direct fetch for
# both blocks (same work as tip)
test_node.send_header_for_blocks(blocks[1:2])
test_node.sync_with_ping()
test_node.wait_for_getdata([x.sha256 for x in blocks[0:2]], timeout=DIRECT_FETCH_RESPONSE_TIME)
# Announcing 16 more headers should trigger direct fetch for 14 more
# blocks
test_node.send_header_for_blocks(blocks[2:18])
test_node.sync_with_ping()
test_node.wait_for_getdata([x.sha256 for x in blocks[2:16]], timeout=DIRECT_FETCH_RESPONSE_TIME)
# Announcing 1 more header should not trigger any response
test_node.last_message.pop("getdata", None)
test_node.send_header_for_blocks(blocks[18:19])
test_node.sync_with_ping()
with mininode_lock:
assert "getdata" not in test_node.last_message
self.log.info("Part 4: success!")
# Now deliver all those blocks we announced.
[test_node.send_message(msg_block(x)) for x in blocks]
self.log.info("Part 5: Testing handling of unconnecting headers")
# First we test that receipt of an unconnecting header doesn't prevent
# chain sync.
for i in range(10):
self.log.debug("Part 5.{}: starting...".format(i))
test_node.last_message.pop("getdata", None)
blocks = []
# Create two more blocks.
for j in range(2):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
# Send the header of the second block -> this won't connect.
with mininode_lock:
test_node.last_message.pop("getheaders", None)
test_node.send_header_for_blocks([blocks[1]])
test_node.wait_for_getheaders()
test_node.send_header_for_blocks(blocks)
test_node.wait_for_getdata([x.sha256 for x in blocks])
[test_node.send_message(msg_block(x)) for x in blocks]
test_node.sync_with_ping()
assert_equal(int(self.nodes[0].getbestblockhash(), 16), blocks[1].sha256)
blocks = []
# Now we test that if we repeatedly don't send connecting headers, we
# don't go into an infinite loop trying to get them to connect.
MAX_UNCONNECTING_HEADERS = 10
for j in range(MAX_UNCONNECTING_HEADERS + 1):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
for i in range(1, MAX_UNCONNECTING_HEADERS):
# Send a header that doesn't connect, check that we get a getheaders.
with mininode_lock:
test_node.last_message.pop("getheaders", None)
test_node.send_header_for_blocks([blocks[i]])
test_node.wait_for_getheaders()
# Next header will connect, should re-set our count:
test_node.send_header_for_blocks([blocks[0]])
# Remove the first two entries (blocks[1] would connect):
blocks = blocks[2:]
# Now try to see how many unconnecting headers we can send
# before we get disconnected. Should be 5*MAX_UNCONNECTING_HEADERS
for i in range(5 * MAX_UNCONNECTING_HEADERS - 1):
# Send a header that doesn't connect, check that we get a getheaders.
with mininode_lock:
test_node.last_message.pop("getheaders", None)
test_node.send_header_for_blocks([blocks[i % len(blocks)]])
test_node.wait_for_getheaders()
# Eventually this stops working.
test_node.send_header_for_blocks([blocks[-1]])
# Should get disconnected
test_node.wait_for_disconnect()
self.log.info("Part 5: success!")
# Finally, check that the inv node never received a getdata request,
# throughout the test
assert "getdata" not in inv_node.last_message
if __name__ == '__main__':
SendHeadersTest().main()
|
py | b40e3940afdb9074cad1b4e36977c2758298a3bf | from extended_euclidean import chineseremaindertheorem
def openning_text(text_address):
"""Opens a text file"""
with open(text_address, "r") as text_file:
textr = text_file.readlines()
result = list()
for blocks in textr:
blocks = blocks.replace("\n", "")
blocks = blocks.split(" ")
result.append(blocks)
return result
def getting_massage(code):
"""decrypting massage out of text file"""
massage = ''
for block in code:
temp = ''
for letters in block:
temp += chr(int(letters) - 11)
massage += temp
if massage[-1] == "|":
for i in range(len(massage)-1, 0, -1):
if not massage[i] == "|":
massage = massage[:i + 1]
break
return massage
def decrypting_method(code, private_key):
"""Using decrypting method"""
d, n, p, q, dp, dq, qinv = private_key
d, n, p, q, dp, dq, qinv = int(d), int(n), int(p), int(q), int(dp), int(dq), int(qinv)
decrypted_code = list()
for blocks in code:
temp = []
for codes in blocks:
codes = int(codes)
temp.append(str(chineseremaindertheorem(dq, dp, p, q, qinv, codes)))
decrypted_code.append(temp)
return decrypted_code
def getting_private_key():
"""making a private key"""
text = openning_text("encrypted_text.txt")
with open("private_key.txt", "r") as text_file:
private_key = text_file.read()
return private_key.split(), text
private_key, encrypted_text = getting_private_key()
text_code = decrypting_method(encrypted_text, private_key)
text = getting_massage(text_code)
with open("decrypted_text.txt", "w") as text_file:
text_file.write(text)
|
py | b40e39d8572a3dea6c888821c47a9ff1cbd75569 | # Copyright (c) 2021 Binbin Zhang([email protected]), Lucky Wong
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import copy
import logging
import os
import sys
import torch
import yaml
from torch.utils.data import DataLoader
from wenet.dataset.dataset import Dataset
from wenet.transformer.asr_model import init_asr_model
from wenet.utils.checkpoint import load_checkpoint
from wenet.utils.file_utils import read_symbol_table, read_non_lang_symbols
from wenet.utils.config import override_config
def get_args():
parser = argparse.ArgumentParser(description='Static quantize your model')
parser.add_argument('--config', required=True, help='config file')
parser.add_argument('--test_data', required=True, help='test data file')
parser.add_argument('--checkpoint', required=True, help='checkpoint model')
parser.add_argument('--num_workers',
default=0,
type=int,
help='num of subprocess workers for reading')
parser.add_argument('--pin_memory',
action='store_true',
default=False,
help='Use pinned memory buffers used for reading')
parser.add_argument('--prefetch',
default=100,
type=int,
help='prefetch number')
parser.add_argument('--script_model',
required=True,
help='output script model')
parser.add_argument('--data_type',
default='raw',
choices=['raw', 'shard'],
help='train and cv data type')
parser.add_argument('--dict', required=True, help='dict file')
parser.add_argument("--non_lang_syms",
help="non-linguistic symbol file. One symbol per line.")
parser.add_argument('--bpe_model',
default=None,
type=str,
help='bpe model for english part')
parser.add_argument('--override_config',
action='append',
default=[],
help="override yaml config")
args = parser.parse_args()
print(args)
return args
def main():
args = get_args()
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(levelname)s %(message)s')
os.environ['CUDA_VISIBLE_DEVICES'] = str("-1")
with open(args.config, 'r') as fin:
configs = yaml.load(fin, Loader=yaml.FullLoader)
if len(args.override_config) > 0:
configs = override_config(configs, args.override_config)
symbol_table = read_symbol_table(args.dict)
test_conf = copy.deepcopy(configs['dataset_conf'])
test_conf['filter_conf']['max_length'] = 102400
test_conf['filter_conf']['min_length'] = 0
test_conf['filter_conf']['token_max_length'] = 102400
test_conf['filter_conf']['token_min_length'] = 0
test_conf['filter_conf']['max_output_input_ratio'] = 102400
test_conf['filter_conf']['min_output_input_ratio'] = 0
test_conf['speed_perturb'] = False
test_conf['spec_aug'] = False
test_conf['shuffle'] = False
test_conf['sort'] = False
test_conf['fbank_conf']['dither'] = 0.0
test_conf['batch_conf']['batch_type'] = "static"
test_conf['batch_conf']['batch_size'] = 1
non_lang_syms = read_non_lang_symbols(args.non_lang_syms)
test_dataset = Dataset(args.data_type,
args.test_data,
symbol_table,
test_conf,
args.bpe_model,
non_lang_syms,
partition=False)
test_data_loader = DataLoader(test_dataset,
batch_size=None,
pin_memory=args.pin_memory,
num_workers=args.num_workers,
prefetch_factor=args.prefetch)
# Init asr model from configs
model_fp32 = init_asr_model(configs)
load_checkpoint(model_fp32, args.checkpoint)
# model must be set to eval mode for static quantization logic to work
model_fp32.eval()
# Fuse the activations to preceding layers, where applicable.
# This needs to be done manually depending on the model architecture.
# Common fusions include `conv + relu` and `conv + batchnorm + relu`
print('================ Float 32 ======================')
print(model_fp32)
# calibrate the prepared model to determine quantization parameters for
# activations in a real world setting, the calibration would be done with
# a representative dataset
with torch.no_grad():
for batch_idx, batch in enumerate(test_data_loader):
keys, feats, target, feats_lengths, target_lengths = batch
model_fp32(feats, feats_lengths, target, target_lengths)
if batch_idx % 100 == 0:
print('Progress utts {}'.format(batch_idx))
sys.stdout.flush()
# Convert the observed model to a quantized model. This does several things:
# quantizes the weights, computes and stores the scale and bias value to be
# used with each activation tensor, and replaces key operators with
# quantized implementations.
print('=================== int8 ======================')
model_int8 = torch.quantization.convert(model_fp32)
print(model_int8)
print('================ int8(script) ==================')
script_model = torch.jit.script(model_int8)
script_model.save(args.script_model)
print(script_model)
if __name__ == '__main__':
main()
|
py | b40e3a54eae7c1ee70a73884a147632cb2f78c3e | import argparse
import h5py
import multiprocessing as mp
import numpy as np
import os
import sys
import tensorflow as tf
import time
import julia
backend = 'TkAgg'
import matplotlib
matplotlib.use(backend)
import matplotlib.pyplot as plt
matplotlib.use('TkAgg')
from contexttimer import Timer
import hgail.misc.utils
import algorithms.utils
from envs import hyperparams, utils, build_env
from envs.utils import str2bool
from utils.math_utils import classify_traj
from algorithms.AGen import rls, validate_utils
from preprocessing.clean_holo import clean_data, csv2txt, create_lane
from preprocessing.extract_feature import extract_ngsim_features
from src.trajdata import convert_raw_ngsim_to_trajdatas
# import pdb
import math
import tqdm
import torch
plt.style.use("ggplot")
N_ITERATION = 1 # the number of iterations of rls step
N_VEH = 1 # the number of controlled vehicles for each simulation
def online_adaption(
env,
policy,
obs,
mean,
env_kwargs=dict(),
lbd=0.99,
adapt_steps=1,
trajinfos=None):
if len(obs.shape) == 2:
obs = np.expand_dims(obs, axis=0)
mean = np.expand_dims(mean, axis=0)
assert trajinfos is not None
policy_fc_weight = np.array(policy.mean_network.fc.weight.data.cpu())
policy_fc_bias = np.array(policy.mean_network.fc.bias.data.cpu()).reshape((2, 1))
new_theta = np.concatenate([policy_fc_weight, policy_fc_bias], axis=1)
new_theta = np.mean(new_theta)
# print("new theta: {}".format(new_theta))
ego_start_frame = trajinfos[env_kwargs['egoid']]['ts']
maxstep = trajinfos[env_kwargs['egoid']]['te'] - trajinfos[env_kwargs['egoid']]['ts'] - 52
env_kwargs['start'] = ego_start_frame + 2
x = env.reset(**env_kwargs)
n_agents = x.shape[0]
# print("Agent number: {}".format(n_agents))
dones = [True] * n_agents
predicted_trajs, adapnets = [], []
policy.reset(dones)
print("max steps")
print(maxstep)
mean = np.expand_dims(mean, axis=2)
prev_hiddens = np.zeros([n_agents, 64])
param_length = 65 if adapt_steps == 1 else 195
for i in range(n_agents):
adapnets.append(rls.rls(lbd, new_theta, param_length, 2))
lx = x
error = [] # size is (maxstep, predict_span, n_agent) each element is a dict(dx: , dy: ,dist: )
curve_error = []
changeLane_error = []
straight_error = []
orig_traj_list = []
pred_traj_list = []
time_list = []
for step in tqdm.tqdm(range(ego_start_frame, maxstep + ego_start_frame - 1)):
a, a_info, hidden_vec = policy.get_actions_with_prev(obs[:, step, :], mean[:, step, :], prev_hiddens)
# print(hidden_vec)
hidden_vec = np.random.randn(1, 64)
if adapt_steps == 1:
adap_vec = hidden_vec
elif adapt_steps == 2:
adap_vec = np.concatenate((hidden_vec, prev_hiddens, obs[:, step, :]), axis=1)
else:
print('Adapt steps can only be 1 and 2 for now.')
exit(0)
adap_vec = np.expand_dims(adap_vec, axis=1)
for i in range(n_agents):
for _ in range(N_ITERATION):
adapnets[i].update(adap_vec[i], mean[i, step+1, :])
adapnets[i].draw.append(adapnets[i].theta[6, 1])
prev_actions, prev_hiddens = a, hidden_vec
error_per_step, time_info, orig_traj, pred_traj = prediction(x, adapnets, env, policy,
prev_hiddens, n_agents, adapt_steps)
traj_cat = classify_traj(orig_traj)
if traj_cat != "invalid":
error.append(error_per_step)
orig_traj_list.append(orig_traj)
pred_traj_list.append(pred_traj)
if traj_cat == "curve":
curve_error.append(error_per_step)
elif traj_cat == "changeLane":
changeLane_error.append(error_per_step)
elif traj_cat == "straight":
straight_error.append(error_per_step)
if "20" in time_info.keys() and "50" in time_info.keys():
time_list.append([time_info["20"], time_info["50"]])
d = np.stack([adapnets[i].draw for i in range(n_agents)])
env_kwargs['start'] += 1
lx = x
x = env.reset(**env_kwargs)
error_info = dict()
error_info["overall"] = error
error_info["curve"] = curve_error
error_info["lane_change"] = changeLane_error
error_info["straight"] = straight_error
error_info["time_info"] = time_list
error_info["orig_traj"] = orig_traj_list
error_info["pred_traj"] = pred_traj_list
print("\n\nVehicle id: {} Statistical Info:\n\n".format(env_kwargs['egoid']))
utils.print_error(error_info)
return error_info
def prediction(x, adapnets, env, policy, prev_hiddens, n_agents, adapt_steps):
predict_span = 50
error_per_step = [] # size is (predict_span, n_agent) each element is a dict(dx: , dy: ,dist: )
valid_data = True
hi_speed_limit = 40
lo_speed_limit = 10
orig_trajectory = []
pred_trajectory = []
start_time = time.time()
time_info = {}
for j in range(predict_span):
# if j == 0:
# print("feature {}".format(j), x)
x[0][15] = 0
a, a_info, hidden_vec = policy.get_actions(x)
# feature_array = np.concatenate([feature_array, np.array(x)], axis=0)
hidden_vec = np.random.randn(1, 64)
if adapt_steps == 1:
adap_vec = hidden_vec
else:
adap_vec = np.concatenate((hidden_vec, prev_hiddens, x), axis=1)
means = np.zeros([n_agents, 2])
log_std = np.zeros([n_agents, 2])
for i in range(x.shape[0]):
means[i] = adapnets[i].predict(np.expand_dims(adap_vec[i], 0))
log_std[i] = np.log(np.std(adapnets[i].theta, axis=0))
prev_hiddens = hidden_vec
# rnd = np.random.normal(size=means.shape)
actions = means
# print("random feature:", actions)
# print("policy feature:", a)
# print("predict step: {}".format(j+1))
nx, r, dones, e_info = env.step(actions)
error_per_agent = [] # length is n_agent, each element is a dict(dx: , dy: ,dist: )
for i in range(n_agents):
assert n_agents == 1
# print("orig x: ", e_info["orig_x"][i])
# print("orig y: ", e_info["orig_y"][i])
# print("orig v: ", e_info["orig_v"][i])
# print("predicted v:", e_info["v"][i])
# print("orig theta: ", e_info["orig_theta"][i])
# print("predicted x: ", e_info["x"][i])
# print("predicted y: ", e_info["y"][i])
dx = abs(e_info["orig_x"][i] - e_info["x"][i])
dy = abs(e_info["orig_y"][i] - e_info["y"][i])
dist = math.hypot(dx, dy)
# print("dist: ", dist)
if e_info["orig_v"][i] > hi_speed_limit or e_info["orig_v"][i] < lo_speed_limit:
valid_data = False
# print("{}-----> dx: {} dy: {} dist: {}".format(j, dx, dy, dist))
if valid_data:
if dist > 140:
exit(0) # this is for debugging
error_per_agent.append(dist)
orig_trajectory.append([e_info["orig_x"][i], e_info["orig_y"][i]])
pred_trajectory.append([e_info["x"][i], e_info["y"][i]])
if valid_data:
error_per_step += error_per_agent
if any(dones):
break
x = nx
end_time = time.time()
if j == 19:
time_info["20"] = end_time - start_time
elif j == 49:
time_info["50"] = end_time - start_time
return error_per_step, time_info, orig_trajectory, pred_trajectory
def collect_trajectories(
args,
params,
egoids,
error_dict,
pid,
env_fn,
policy_fn,
use_hgail,
random_seed,
lbd,
adapt_steps):
print('env initialization args')
print(args)
env, trajinfos, _, _ = env_fn(args, n_veh=N_VEH, alpha=0.)
# print(trajinfos[0])
args.policy_recurrent = True
policy = policy_fn(args, env, mode=1)
if torch.cuda.is_available():
policy = policy.cuda()
with tf.Session() as sess:
# initialize variables
sess.run(tf.global_variables_initializer())
# then load parameters
if use_hgail:
for i, level in enumerate(policy):
level.algo.policy.set_param_values(params[i]['policy'])
policy = policy[0].algo.policy
else:
policy_param_path = "./data/experiments/NGSIM-gail/imitate/model/policy.pkl"
policy.load_param(policy_param_path)
print("load policy param from: {}".format(policy_param_path))
# policy.set_param_values(params['policy'])
normalized_env = hgail.misc.utils.extract_normalizing_env(env)
if normalized_env is not None:
normalized_env._obs_mean = params['normalzing']['obs_mean']
normalized_env._obs_var = params['normalzing']['obs_var']
# collect trajectories
egoids = np.unique(egoids)
nids = len(egoids)
veh_2_index = {}
if args.env_multiagent:
data, index = validate_utils.get_multiagent_ground_truth(args.ngsim_filename, args.h5_filename)
for i, idx in enumerate(index):
veh_2_index[idx] = i
else:
data = validate_utils.get_ground_truth(args.ngsim_filename, args.h5_filename)
sample = np.random.choice(data['observations'].shape[0], 2)
kwargs = dict()
# print(('Loading obs data Running time: %s Seconds' % (end_time - start_time)))
if args.env_multiagent:
# I add not because single simulation has no orig_x etc.
# egoid = random.choice(egoids)
trajinfos = trajinfos[0]
error = {"overall": [],
"curve": [],
"lane_change": [],
"straight": [],
"time_info": [],
"orig_traj": [],
"pred_traj": []}
for veh_id in trajinfos.keys():
if trajinfos[veh_id]["te"] - trajinfos[veh_id]["ts"] <= 52:
continue
if random_seed:
kwargs = dict(random_seed=random_seed + veh_id)
print("egoid: {}, ts: {}, te: {}".format(veh_id, trajinfos[veh_id]["ts"], trajinfos[veh_id]["te"]))
print("data index is {}".format(veh_2_index[veh_id]))
kwargs['egoid'] = veh_id
kwargs['traj_idx'] = 0
error_info = online_adaption(
env,
policy,
obs=data['observations'][[veh_2_index[veh_id]], :, :],
mean=data['actions'][[veh_2_index[veh_id]], :, :],
env_kwargs=kwargs,
lbd=lbd,
adapt_steps=adapt_steps,
trajinfos=trajinfos
)
error["overall"] += error_info["overall"]
error["curve"] += error_info["curve"]
error["lane_change"] += error_info["lane_change"]
error["straight"] += error_info["straight"]
error["time_info"] += error_info["time_info"]
error["orig_traj"] += error_info["orig_traj"]
error["pred_traj"] += error_info["pred_traj"]
error_dict.append(error)
else:
# for i in sample:
for i, egoid in enumerate(egoids):
sys.stdout.write('\rpid: {} traj: {} / {}\n'.format(pid, i, nids))
index = veh_2_index[egoid]
traj = online_adaption(
env,
policy,
obs=data['observations'][index, :, :],
mean=data['actions'][index, :, :],
env_kwargs=dict(egoid=egoid, traj_idx=[0]),
lbd=lbd,
adapt_steps=adapt_steps,
)
return error_dict
def parallel_collect_trajectories(
args,
params,
egoids,
n_proc,
env_fn=build_env.build_ngsim_env,
use_hgail=False,
random_seed=None,
lbd=0.99,
adapt_steps=1):
# build manager and dictionary mapping ego ids to list of trajectories
tf_policy = False
parallel = False
# set policy function
policy_fn = validate_utils.build_policy if tf_policy else algorithms.utils.build_policy
# partition egoids
proc_egoids = utils.partition_list(egoids, n_proc)
if parallel:
manager = mp.Manager()
error_dict = manager.list()
# pool of processes, each with a set of ego ids
pool = mp.Pool(processes=n_proc)
# print(('Creating parallel env Running time: %s Seconds' % (end_time - start_time)))
# run collection
results = []
for pid in range(n_proc):
res = pool.apply_async(
collect_trajectories,
args=(
args,
params,
proc_egoids[pid],
error_dict,
pid,
env_fn,
policy_fn,
use_hgail,
random_seed,
lbd,
adapt_steps
)
)
results.append(res)
[res.get() for res in results]
pool.close()
else:
error_dict = []
error_dict = collect_trajectories(
args,
params,
proc_egoids[0],
error_dict,
0,
env_fn,
policy_fn,
use_hgail,
random_seed,
lbd,
adapt_steps
)
# wait for the processes to finish
# let the julia processes finish up
time.sleep(10)
return error_dict[0]
def single_process_collect_trajectories(
args,
params,
egoids,
starts,
n_proc,
env_fn=build_env.build_ngsim_env,
max_steps=200,
use_hgail=False,
random_seed=None):
'''
This function for debugging purposes
'''
# build list to be appended to
trajlist = []
# set policy function
policy_fn = build_env.build_hierarchy if use_hgail else validate_utils.build_policy
tf.reset_default_graph()
# collect trajectories in a single process
collect_trajectories(
args,
params,
egoids,
starts,
trajlist,
n_proc,
env_fn,
policy_fn,
max_steps,
use_hgail,
random_seed
)
return trajlist
def collect(
egoids,
args,
exp_dir,
use_hgail,
params_filename,
n_proc,
collect_fn=parallel_collect_trajectories,
random_seed=None,
lbd = 0.99,
adapt_steps=1):
'''
Description:
- prepare for running collection in parallel
- multiagent note: egoids and starts are not currently used when running
this with args.env_multiagent == True
'''
# load information relevant to the experiment
params_filepath = os.path.join(exp_dir, 'imitate/{}'.format(params_filename))
params = np.load(params_filepath)['params'].item()
# validation setup
validation_dir = os.path.join(exp_dir, 'imitate', 'test')
utils.maybe_mkdir(validation_dir)
with Timer():
error = collect_fn(
args,
params,
egoids,
n_proc,
use_hgail=use_hgail,
random_seed=random_seed,
lbd=lbd,
adapt_steps=adapt_steps
)
return error
# utils.write_trajectories(output_filepath, trajs)
def load_egoids(filename, args, n_runs_per_ego_id=10, env_fn=build_env.build_ngsim_env):
offset = args.env_H + args.env_primesteps
basedir = os.path.expanduser('~/Autoenv/data/') # TODO: change the file path
ids_filename = filename.replace('.txt', '-index-{}-ids.h5'.format(offset))
print("ids_filename")
print(ids_filename)
ids_filepath = os.path.join(basedir, ids_filename)
print("Creating ids file")
# this should create the ids file
env_fn(args)
if not os.path.exists(ids_filepath):
raise ValueError('file unable to be created, check args')
ids = np.array(h5py.File(ids_filepath, 'r')['ids'].value)
print("Creating starts file")
ids_file = h5py.File(ids_filepath, 'r')
ts = ids_file['ts'].value
te = ids_file['te'].value
length = np.array([e - s for (s, e) in zip(ts, te)])
traj_num = length.sum()
ids = np.tile(ids, n_runs_per_ego_id)
return ids, traj_num
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='validation settings')
parser.add_argument('--n_proc', type=int, default=1)
parser.add_argument('--exp_dir', type=str, default='./data/experiments/NGSIM-gail')
parser.add_argument('--params_filename', type=str, default='itr_200.npz')
parser.add_argument('--n_runs_per_ego_id', type=int, default=1)
parser.add_argument('--use_hgail', type=str2bool, default=False)
parser.add_argument('--use_multiagent', type=str2bool, default=False)
parser.add_argument('--n_multiagent_trajs', type=int, default=10000)
parser.add_argument('--debug', type=str2bool, default=False)
parser.add_argument('--random_seed', type=int, default=None)
parser.add_argument('--n_envs', type=int, default=None)
parser.add_argument('--remove_ngsim_vehicles', type=str2bool, default=False)
parser.add_argument('--lbd', type=float, default=0.99)
parser.add_argument('--adapt_steps', type=int, default=1)
run_args = parser.parse_args()
j = julia.Julia()
j.using("NGSIM")
args_filepath = "./args/params.npz"
if os.path.isfile(args_filepath):
args = hyperparams.load_args(args_filepath)
else:
raise ValueError("No such params file") # if no such file, please run save_args.py
if run_args.use_multiagent:
args.env_multiagent = True
args.remove_ngsim_vehicles = run_args.remove_ngsim_vehicles
if run_args.debug:
collect_fn = single_process_collect_trajectories
else:
collect_fn = parallel_collect_trajectories
prev_lane_name = None # used to generate roadway information
data_base_dir = "./preprocessing/data" # the directory we used to processing raw data
total_error = {
"overall": [],
"curve": [],
"lane_change": [],
"straight": [],
"time_info": [],
"orig_traj": [],
"pred_traj": []
}
for dir_name in os.listdir(data_base_dir):
if "downsampled" not in dir_name and os.path.isdir(os.path.join(data_base_dir, dir_name, "processed")):
dir_error = {
"overall": [],
"curve": [],
"lane_change": [],
"straight": [],
"time_info": [],
"orig_traj": [],
"pred_traj": []
}
for file_name in os.listdir(os.path.join(data_base_dir, dir_name, "processed")):
if "section" in file_name:
orig_traj_file = os.path.join(dir_name, "processed", file_name)
print("processing file {}".format(orig_traj_file))
else:
print("lane file, skipping")
continue
lane_file = os.path.join(dir_name, "processed", '{}_lane'.format(orig_traj_file[:19]))
processed_data_path = 'holo_{}_perfect_cleaned.csv'.format(orig_traj_file[5:19])
df_len = clean_data(orig_traj_file) # clean Holo data raw csv
if df_len == 0:
print("Invalid file, skipping")
continue
csv2txt(processed_data_path)
if prev_lane_name != lane_file:
create_lane(lane_file)
else:
print("Using same lane file, skipping generating a new one")
print("Finish cleaning the original data")
print("Start generating roadway")
if prev_lane_name != lane_file:
base_dir = os.path.expanduser('~/Autoenv/data/')
j.write_roadways_to_dxf(base_dir)
j.write_roadways_from_dxf(base_dir)
prev_lane_name = lane_file
print("Finish generating roadway")
convert_raw_ngsim_to_trajdatas()
print("Start feature extraction")
extract_ngsim_features(output_filename="ngsim_holo_new.h5", n_expert_files=1)
print("Finish converting and feature extraction")
fn = "trajdata_holo_trajectories.txt"
hn = './data/trajectories/ngsim_holo_new.h5'
if run_args.n_envs:
args.n_envs = run_args.n_envs
# args.env_H should be 200
sys.stdout.write('{} vehicles with H = {}\n'.format(args.n_envs, args.env_H))
args.ngsim_filename = fn
args.h5_filename = hn
if args.env_multiagent:
egoids, _ = load_egoids(fn, args, run_args.n_runs_per_ego_id)
else:
egoids, _ = load_egoids(fn, args, run_args.n_runs_per_ego_id)
print("egoids")
print(egoids)
# print("starts")
# print(starts)
if len(egoids) == 0:
print("No valid vehicles, skipping")
continue
error = collect(
egoids,
args,
exp_dir=run_args.exp_dir,
params_filename=run_args.params_filename,
use_hgail=run_args.use_hgail,
n_proc=run_args.n_proc,
collect_fn=collect_fn,
random_seed=run_args.random_seed,
lbd=run_args.lbd,
adapt_steps=run_args.adapt_steps
)
print("\n\nDirectory: {}, file: {} Statistical Info:\n\n".format(dir_name, file_name))
utils.print_error(error)
dir_error["overall"] += error["overall"]
dir_error["curve"] += error["curve"]
dir_error["lane_change"] += error["lane_change"]
dir_error["straight"] += error["straight"]
dir_error["time_info"] += error["time_info"]
dir_error["orig_traj"] += error["orig_traj"]
dir_error["pred_traj"] += error["pred_traj"]
print("\n\nDirectory: {} Statistical Info:\n\n".format(dir_name))
utils.print_error(dir_error)
total_error["overall"] += dir_error["overall"]
total_error["curve"] += dir_error["curve"]
total_error["lane_change"] += dir_error["lane_change"]
total_error["straight"] += dir_error["straight"]
total_error["time_info"] += dir_error["time_info"]
total_error["orig_traj"] += dir_error["orig_traj"]
total_error["pred_traj"] += dir_error["pred_traj"]
print("\n\nOverall Statistical Info up to now:\n\n")
utils.print_error(total_error)
|
py | b40e3b9106b0ee4d4e20fda72c5bb310ecaf0c0c |
# sys imports
import os,sys
sys.path.append('/home/ryan/github/prosodic')
import warnings
warnings.filterwarnings('ignore')
import prosodic as p
from tqdm import tqdm
import pandas as pd,numpy as np,random,json,pickle,shutil
from collections import defaultdict,Counter
import subprocess,multiprocessing as mp
mp.set_start_method('fork')
from pprint import pprint
from itertools import product
pd.options.display.max_columns=False
import re,nltk
from sqlitedict import SqliteDict
import logging
logging.Logger.manager.loggerDict['sqlitedict'].disabled=True
from parmapper import parmap
from functools import partial
# constants
ENGINE_PROSODIC='prosodic'
ENGINE_CADENCE='cadence'
ENGINE=ENGINE_PROSODIC
MIN_WORDS_IN_PHRASE=2
DEFAULT_LANG='en'
PATH_HERE=os.path.abspath(os.path.dirname(__file__))
PATH_CODE=PATH_HERE
PATH_REPO=os.path.abspath(os.path.join(PATH_CODE,'..'))
PATH_HOME=os.path.join(os.path.expanduser('~'),'.cadence')
PATH_DATA=os.path.join(PATH_HOME,'data')
# PATH_DATA=os.path.join(PATH_REPO,'data')
DATA_URL='https://www.dropbox.com/s/fywmqrlpemjf43c/data_cadence.zip?dl=1'
PATH_NOTEBOOKS=os.path.join(PATH_REPO,'notebooks')
PATH_IPA_FEATS=os.path.join(PATH_DATA,'data.feats.ipa.csv')
INCL_ALT=True
DEFAULT_NUM_PROC=1
#mp.cpu_count()//2# - 1
KEEP_BEST=1
SBY=csby=['combo_i','word_i','syll_i']
PARSERANKCOL='parse_rank'
LINEKEY=[
'stanza_i',
'line_i',
'line_str',
'linepart_i',#'linepart_str',
'linepart_str',
'linepart_end_str',
PARSERANKCOL,
#'parse_str',
'is_troch',
'parse_i',
'parse',
# 'combo_stress',
'parse_str',
'combo_stress',
'combo_ipa',
'combo_i',
# 'combo_i','combo_stress','combo_ipa',
'parse_is_bounded','parse_bounded_by',
'parse_pos_i','parse_pos',
'word_i','word_str','word_ipa_i','word_ipa',
'syll_i','combo_syll_i','syll_str','syll_ipa','syll_stress','syll_weight',
'parse_syll_i','parse_syll',
]
PARSELINEKEY = LINEKEY[:LINEKEY.index('parse_pos_i')]
PARSESYLLKEY=LINEKEY
TOTALCOL='*total'
DEFAULT_CONSTRAINTS = {
'*w/peak',
'*w/stressed',
# '*s/unstressed',
'*f-res',
'*w-res'
}
NUM_BOUNDED_TO_STORE = 10
constraint_names_in_prosodic = {
'*f-res':'footmin-f-resolution',
'*s/unstressed':'stress.s=>-u',
'*w-res':'footmin-w-resolution',
'*w/peak':'strength.w=>-p',
'*w/stressed':'stress.w=>-p',
'*s/trough':'strength.s=>-u',
}
constraint_names_from_prosodic = dict((v,k) for k,v in constraint_names_in_prosodic.items())
txt="""In Xanadu did Kubla Khan
A stately pleasure-dome decree:
Where Alph, the sacred river, ran
Through caverns measureless to man
Down to a sunless sea.
"""
line=txt.split('\n')[0]
APPLY_POSTHOC_CONSTRAINTS=False
# local imports
from .tools import *
from .langs import *
from .constraints import *
from .parsers import *
from .cadence import *
# check
check_basic_config() |
py | b40e3bd8c09d0960f9000716c8bbaaa80e1b0953 | # -*- coding: utf-8 -*-
"""
External-IO connections based on asyncio
The only 3 requirements for these connections is:
(1) store Moler's connection inside self.moler_connection attribute
(2) plugin into Moler's connection the way IO outputs data to external world:
self.moler_connection.how2send = self.send
(3) forward IO received data into self.moler_connection.data_received(data)
"""
__author__ = 'Grzegorz Latuszek'
__copyright__ = 'Copyright (C) 2018, Nokia'
__email__ = '[email protected]'
|
py | b40e3c147900fb0d0abe72989ea628ee9d6af5fa | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# pyMLP Molecular Lipophilic Potential evaluator
# Copyright (c) 2006-2007 Julien Lefeuvre <[email protected]>
#
"""pyMLP: Molecular Lipophilicity Potential evaluator"""
# PLEASE DO NOT CHANGE FORMAT OF __version__ LINE (setup.py reads this)
__author__ = "Julien Lefeuvre <[email protected]>"
__version__ = "1.0"
__date__ = "2007-03-28"
__copyright__ = "Copyright (c) 2006-2007 %s. All rights reserved." % __author__
__licence__ = "BSD"
import sys
import os
import shutil
import time
import numpy
from optparse import OptionParser
from pprint import pformat
try:
import psyco
psyco.full()
except ImportError:
pass
class Defaults(object):
"""Constants"""
def __init__(self):
self.gridmargin = 10.0
self.fidatadefault = { #Default fi table
'ALA': {'CB': '0.63', #fi : lipophilic atomic potential
'C': '-0.54',
'CA': '0.02',
'O': '-0.68',
'N': '-0.44'},
'ARG': {'C': '-0.54',
'CA': '0.02',
'CB': '0.45',
'CD': '0.45',
'CG': '0.45',
'CZ': '-0.54',
'N': '-0.44',
'NE': '-0.55',
'NH1': '-0.11',
'NH2': '-0.83',
'O': '-0.68'},
'ASN': {'C': '-0.54',
'CA': '0.02',
'CB': '0.02',
'CG': '0.45',
'N': '-0.44',
'ND2': '-0.11',
'O': '-0.68',
'OD1': '-0.68'},
'ASP': {'C': '-0.54',
'CA': '0.02',
'CB': '0.45',
'CG': '0.54',
'N': '-0.44',
'O': '-0.68',
'OD1': '-0.68',
'OD2': '0.53'},
'CYS': {'C': '-0.54',
'CA': '0.02',
'CB': '0.45',
'N': '-0.44',
'O': '-0.68',
'SG': '0.27'},
'GLN': {'C': '-0.54',
'CA': '0.02',
'CB': '0.45',
'CD': '-0.54',
'CG': '0.45',
'N': '-0.44',
'NE2': '-0.11',
'O': '-0.68',
'OE1': '-0.68'},
'GLU': {'C': '-0.54',
'CA': '0.02',
'CB': '0.45',
'CD': '-0.54',
'CG': '0.45',
'N': '-0.44',
'O': '-0.68',
'OE1': '-0.68',
'OE2': '0.53'},
'GLY': {'C': '-0.54',
'CA': '0.45',
'O': '-0.68',
'N': '-0.55'},
'HIS': {'C': '-0.54',
'CA': '0.02',
'CB': '0.45',
'CD2': '0.31',
'CE1': '0.31',
'CG': '0.09',
'N': '-0.44',
'ND1': '-0.56',
'NE2': '-0.80',
'O': '-0.68'},
'HYP': {'C': '-0.54',
'CA': '0.02',
'CB': '0.45',
'CD1': '0.45',
'CG': '0.02',
'N': '-0.92',
'O': '-0.68',
'OD2': '-0.93'},
'ILE': {'C': '-0.54',
'CA': '0.02',
'CB': '0.02',
'CD': '0.63',
'CD1': '0.63',
'CG1': '0.45',
'CG2': '0.63',
'N': '-0.44',
'O': '-0.68'},
'LEU': {'C': '-0.54',
'CA': '0.02',
'CB': '0.45',
'CD1': '0.63',
'CD2': '0.63',
'CG': '0.02',
'N': '-0.44',
'O': '-0.68'},
'LYS': {'C': '-0.54',
'CA': '0.02',
'CB': '0.45',
'CD': '0.45',
'CE': '0.45',
'CG': '0.45',
'N': '-0.44',
'NZ': '-1.08',
'O': '-0.68'},
'MET': {'C': '-0.54',
'CA': '0.02',
'CB': '0.45',
'CE': '0.63',
'CG': '0.45',
'N': '-0.44',
'O': '-0.68',
'SD': '-0.30'},
'PCA': {'C': '-0.54',
'CA': '0.02',
'CB': '0.45',
'CD': '-0.54',
'CG': '0.45',
'N': '1.52',
'O': '-0.68',
'OE': '-0.68'},
'PHE': {'C': '-0.54',
'CA': '0.02',
'CB': '0.45',
'CD1': '0.31',
'CD2': '0.31',
'CE1': '0.31',
'CE2': '0.31',
'CG': '0.09',
'CZ': '0.31',
'N': '-0.44',
'O': '-0.68'},
'PRO': {'C': '-0.54',
'CA': '0.02',
'CB': '0.45',
'CD': '0.45',
'CG': '0.45',
'N': '-0.92',
'O': '-0.68'},
'SER': {'C': '-0.54',
'CA': '0.02',
'CB': '0.45',
'N': '-0.44',
'O': '-0.68',
'OG': '-0.99'},
'THR': {'C': '-0.54',
'CA': '0.02',
'CB': '0.02',
'CG2': '0.63',
'N': '-0.44',
'O': '-0.68',
'OG1': '-0.93'},
'TRP': {'C': '-0.54',
'CA': '0.02',
'CB': '0.45',
'CD1': '0.31',
'CD2': '0.24',
'CE2': '0.24',
'CE3': '0.31',
'CG': '0.09',
'CH2': '0.31',
'CZ2': '0.31',
'CZ3': '0.31',
'N': '-0.44',
'NE1': '-0.55',
'O': '-0.68'},
'TYR': {'C': '-0.54',
'CA': '0.02',
'CB': '0.45',
'CD1': '0.31',
'CD2': '0.31',
'CE1': '0.31',
'CE2': '0.31',
'CG': '0.09',
'CZ': '0.09',
'N': '-0.44',
'O': '-0.68',
'OH': '-0.17'},
'VAL': {'C': '-0.54',
'CA': '0.02',
'CB': '0.02',
'CG1': '0.63',
'CG2': '0.63',
'N': '-0.44',
'O': '-0.68'}}
def _CLIparsing():
"""Parsing of pyMLP command line"""
usage='\n%%prog -i file.pdb\n\n%s' %__doc__
version="%%prog %s %s" %(__version__, __date__)
CLparser = OptionParser(usage=usage, version=version)
CLparser.add_option('-i', '--inpdb', dest='pdbfile',
help='PDB file (input)', metavar='file.pdb')
CLparser.add_option('-f', '--fipdb', dest='fipdbfile',
help='PDB file with added fi (partial lipophilicity) (input/output)'
' (optional)', metavar='file_fi.pdb')
CLparser.add_option('-t', '--fitab', dest='fitabfile',
help='FI table used to convert file.pdb into file_fi.pdb '
'(input/output) (optional)', metavar='fi.tab')
CLparser.add_option('-o', '--outdx', dest='dxfile',
help='DX file (output) (optional)', metavar='file.dx')
CLparser.add_option('-m', '--method', dest='method',
help='Potential calculation method : '
'dubost [1/(1+d)] '
'fauchere [exp(-d)] (default) '
'brasseur [exp(-d/3.1)] '
'buckingham [1/d**n] '
'type5 [exp(-sqrt(d))] '
'none [no calculation]', metavar='fauchere')
CLparser.add_option('-s', '--spacing', type="float", dest='spacing',
help='Grid spacing (default = 1.0 Angstrom)', metavar='1.0')
CLparser.add_option('-n', '--n_exp', type="float", dest='nexp',
help='Exponent for the buckingham method (default = 3.0)',
metavar='3.0')
CLparser.add_option('-v', '--verbose', action='store_true', dest='verbose',
help='make a lot of noise ...')
CLparser.set_defaults(verbose=False, spacing=1.0, method='fauchere',
nexp=3.0)
(params, args) = CLparser.parse_args()
#Checking if the input is valid
if args or (not params.pdbfile and not params.fipdbfile):
CLparser.error('This script require parameters ...\n'
'see help : pyMLP.py -h')
methods=['dubost', 'fauchere', 'brasseur', 'buckingham', 'type5', 'none']
if params.method not in methods:
CLparser.error('Please use a valid method ...\n'
'see help : pyMLP.py -h')
return params
def writefitab(fidata,fitabfile,verbose=False):
"""write fidata in the file fitabfile (to let the user modify it)"""
try:
fitabf = open(fitabfile,'w')
fitabf.write('fidata = {\n '+pformat(fidata)[1:])
if verbose: sys.stdout.write("%s created with default values...\n"
% fitabfile)
except IOError:
sys.stderr.write("Can't write file named : %s\n" % fitabfile)
class Atom(object):
"""Atom properties needed for the calculation"""
def __init__(self, x, y, z, fi):
self.x = x
self.y = y
self.z = z
self.fi = fi
class Molecule(object):
"""Main class of pyMLP"""
def __init__(self, verbose=False):
self.verbose = verbose
self.name = 'molecule'
self.data = None
self.pot = None
self.griddim = None
self.gridcoord = None
self.spacing = None
def parsepdb(self, pdbfile, checkforfi=False):
"""Parsing a PDB (Protein Data Bank) formated file"""
if pdbfile[-4:].lower()=='.pdb':
self.name = pdbfile[0:-4]
else:
self.name = pdbfile
try:
pdbtext = open(pdbfile)
except IOError:
if self.verbose: sys.stderr.write("Can't open %s ...\n" % pdbfile)
sys.exit()
self.data=[]
for line in pdbtext:
recordname=line[:6].strip()
if recordname in ['MODEL', 'ENDMDL', 'TER', 'END']:
atmnumber, atmname, altloc, resname = None, None, None, None
chainid, resseq, icode = None, None, None
atmx, atmy, atmz = None, None, None
occupancy, tempfactor = None, None
fi = None
comments=line[6:]
elif recordname in ['ATOM', 'HETATM']:
try:
atmnumber = int(line[6:11].strip())
atmname = line[12:16].strip()
altloc = line[16].strip()
resname = line[17:20].strip()
chainid = line[21].strip()
resseq = int(line[22:26].strip())
icode = line[26].strip()
atmx = float(line[30:38].strip())
atmy = float(line[38:46].strip())
atmz = float(line[46:54].strip())
occupancy = float(line[54:60].strip())
tempfactor = float(line[60:66].strip())
if checkforfi:
fi = float(line[66:72].strip())
comments = line[72:]
else:
fi = None
comments = line[66:]
except ValueError:
if self.verbose: sys.stderr.write(
"%s might not respect PDB standards\nor the fi "
"parameters could have been misplaced\n" % pdbfile)
continue
else:
continue
pdbline={'recordname': recordname,
'atmnumber': atmnumber,
'atmname': atmname,
'altloc': altloc,
'resname': resname,
'chainid': chainid,
'resseq': resseq,
'icode': icode,
'atmx': atmx,
'atmy': atmy,
'atmz': atmz,
'occupancy': occupancy,
'tempfactor': tempfactor,
'fi': fi,
'comments': comments}
self.data.append(pdbline)
if self.verbose: sys.stdout.write(
"\n%s was parsed ... %i lines were taken into "
"account\n" % (pdbfile, len(self.data)))
def assignfi(self, fidata):
"""assign fi parameters to each atom in the pdbfile"""
for line in self.data:
if line['resname'] in fidata:
if not line['fi']:
try:
fi=float(fidata[line['resname']][line['atmname']])
line['fi']=fi
except KeyError:
if self.verbose: sys.stderr.write(
"Atom Number %s is not defined in \nthe fi "
"parameters (might be an H)\n" % line['atmnumber'])
continue
def writefipdb(self,fipdbfile):
"""write a fipdb file containing the data for the pdbfile and the fi
parameters"""
try:
fipdbf = file(fipdbfile,'w')
except IOError:
if self.verbose: sys.stderr.write(
"I am having difficulties writing on %s" % fipdbfile)
for d in self.data:
if d['fi']:
header='%-6s%5i %-3s%1s%3s %1s%4i%1s ' % (
d['recordname'], d['atmnumber'], d['atmname'], d['altloc'],
d['resname'], d['chainid'], d['resseq'], d['icode'])
coord='%8.3f%8.3f%8.3f' % (d['atmx'], d['atmy'], d['atmz'])
fi='%6.2f \n' % (d['fi'])
fipdbf.write(header+coord+fi)
if self.verbose: sys.stdout.write('%s was writen' % fipdbfile)
def _griddimcalc(self, listcoord, spacing, gridmargin):
"""Determination of the grid dimension"""
coordmin = min(listcoord) - gridmargin
coordmax = max(listcoord) + gridmargin
adjustment = ((spacing - (coordmax - coordmin)) % spacing) / 2.
coordmin = coordmin - adjustment
coordmax = coordmax + adjustment
ngrid = int(round((coordmax - coordmin) / spacing))
return coordmin, coordmax, ngrid
def _dubost(self, fi, d, n):
return (100 * fi / (1 + d)).sum()
def _fauchere(self, fi, d, n):
return (100 * fi * numpy.exp(-d)).sum()
def _brasseur(self, fi, d, n):
#3.1 division is there to remove any units in the equation
#3.1A is the average diameter of a water molecule (2.82 -> 3.2)
return (100 * fi * numpy.exp(-d/3.1)).sum()
def _buckingham(self, fi, d, n):
return (100 * fi / (d**n)).sum()
def _type5(self, fi, d, n):
return (100 * fi * numpy.exp(-numpy.sqrt(d))).sum()
def calculatefimap(self, method, spacing, nexp):
"""Calculation loop"""
atoms=[]
for d in self.data:
if d['fi']:
atoms.append(Atom(d['atmx'], d['atmy'], d['atmz'], d['fi']))
#grid settings in angstrom
gridmargin = Defaults().gridmargin
xmingrid, xmaxgrid, nxgrid = self._griddimcalc([a.x for a in atoms],
spacing, gridmargin)
ymingrid, ymaxgrid, nygrid = self._griddimcalc([a.y for a in atoms],
spacing, gridmargin)
zmingrid, zmaxgrid, nzgrid = self._griddimcalc([a.z for a in atoms],
spacing, gridmargin)
self.spacing = spacing
self.griddim = (nxgrid+1, nygrid+1, nzgrid+1)
self.gridcoord = [[xmingrid, xmaxgrid],
[ymingrid, ymaxgrid],
[zmingrid, zmaxgrid]]
if self.verbose: sys.stdout.write(
"\nGrid dimension (angstroms):\n"
"coord : min max ngrid\n"
" x : %8.4f %8.4f %8i\n"
" y : %8.4f %8.4f %8i\n"
" z : %8.4f %8.4f %8i\n\n" %(xmingrid, xmaxgrid, nxgrid,
ymingrid, ymaxgrid, nygrid, zmingrid, zmaxgrid, nzgrid))
coordatms = numpy.zeros((len(atoms),3),float)
fiatms = numpy.zeros((len(atoms)),float)
for p in range(len(atoms)):
coordatms[p] = [atoms[p].x, atoms[p].y, atoms[p].z]
fiatms[p] = atoms[p].fi
self.pot = numpy.zeros((nxgrid+1, nygrid+1, nzgrid+1), float)
gridsize = (nxgrid+1) * (nygrid+1) * (nzgrid+1)
coordgridpts = numpy.zeros((nxgrid+1, nygrid+1, nzgrid+1,3), float)
for i in range(nxgrid+1):
for j in range(nygrid+1):
for k in range(nzgrid+1):
xgrid = xmingrid + i * spacing
ygrid = ymingrid + j * spacing
zgrid = zmingrid + k * spacing
coordgridpts[i,j,k]=[xgrid, ygrid, zgrid]
if self.verbose:
sys.stdout.write('\nGrid Points Coordinates evaluated\n\n')
if method == 'dubost':
computemethod = self._dubost
elif method == 'fauchere':
computemethod = self._fauchere
elif method == 'brasseur':
computemethod = self._brasseur
elif method == 'buckingham':
computemethod = self._buckingham
elif method == 'type5':
computemethod = self._type5
else:
sys.stderr.write('You should never come here !\n')
counter = 0.
for i in range(nxgrid+1):
for j in range(nygrid+1):
for k in range(nzgrid+1):
#Evaluation of the distance between th grid point and each atoms
dist = numpy.sqrt(((coordgridpts[i,j,k,]
- coordatms[:,])**2).sum(1))
self.pot[i,j,k] = computemethod(fiatms, dist, nexp)
counter += 1.
if self.verbose:
sys.stdout.write('\rCalculation in progress :'
' %8.2f%%' % (counter*100/((nxgrid+1)*(nygrid+1))))
if self.verbose:
sys.stdout.write('\n\nMLPmin = %8.3f | MLPmax = %8.3f | '
'MLPmean = %8.3f\n\n' % (self.pot.min(),
self.pot.max(), self.pot.mean()))
def writedxfile(self, dxfile):
"""Write a dx (openDX) file"""
if not self.pot.any():
sys.stderr.write('\nNo Data to write !\n\n')
return
try:
dxf = file(dxfile,'w')
dxf.write('#pyMLP output file\n'
'# \n'
'#A computer once beat me at chess, \n'
'#but it was no match for me at kick boxing.\n'
'# \n')
dxf.write('object 1 class gridpositions counts '
'%i %i %i\n' % self.griddim)
gridmin = tuple([xyzmin[0] for xyzmin in self.gridcoord])
dxf.write('origin %8.6e %8.6e %8.6e\n' % gridmin)
dxf.write('delta %8.6e %8.6e %8.6e\n' % (self.spacing, 0., 0.))
dxf.write('delta %8.6e %8.6e %8.6e\n' % (0., self.spacing, 0.))
dxf.write('delta %8.6e %8.6e %8.6e\n' % (0., 0., self.spacing))
dxf.write('object 2 class gridconnections counts '
'%i %i %i\n' % self.griddim)
nbtot = self.griddim[0]*self.griddim[1]*self.griddim[2]
dxf.write('object 3 class array type double rank 0 items'
' %i data follows\n' % nbtot)
self.pot = self.pot.reshape(nbtot)
for m in range(0, nbtot-nbtot%3, 3):
val = tuple(self.pot[m:m+3])
dxf.write('%8.6e %8.6e %8.6e\n' % val)
if 0 < nbtot%3 < 3:
for m in self.pot[nbtot-nbtot%3:nbtot]:
dxf.write('%8.6e ' % m)
dxf.write('\n')
dxf.write('attribute "dep" string "positions"\n'
'object "regular positions regular connections" '
'class field\n'
'component "positions" value 1\n'
'component "connections" value 2\n'
'component "data" value 3\n')
except IOError:
sys.stderr.write('\nI tried to prevent it ... but writing the .dx'
'file was not possible !')
if self.verbose:
sys.stdout.write('\nMolecular Lipophilic Potential Map '
'saved in %s\n\nBye ...\n\n' % dxfile)
def main():
"""pyMLP main function"""
p = _CLIparsing() #parsing the command line and getting options in p
defaults = Defaults()
fidata = defaults.fidatadefault
if p.fitabfile:
try: #import fidata if requested
import imp
fitabf = imp.load_source('fitabf', p.fitabfile)
fidata = fitabf.fidata
if p.verbose: sys.stdout.write("%s is compiled for internal use "
"as %sc\n" % (p.fitabfile, p.fitabfile))
except IOError: #export fidata if requested
if p.verbose: sys.stderr.write("Can't open %s ... "
"using default values and creating a template\n" % p.fitabfile)
writefitab(fidata, p.fitabfile, verbose=p.verbose)
molec = Molecule(verbose=p.verbose)
if p.pdbfile:
if os.path.isfile(p.pdbfile):
molec.parsepdb(p.pdbfile)
molec.assignfi(fidata)
if p.fipdbfile:
if os.path.isfile(p.fipdbfile):
if p.verbose: sys.stderr.write('%s already exists '
'pyMLP will not overwrite it\n' % p.fipdbfile)
pass
else:
molec.writefipdb(p.fipdbfile)
else:
if p.verbose: sys.stderr.write("Can't open %s ...\n" % p.pdbfile)
sys.exit()
elif p.fipdbfile:
if os.path.isfile(p.fipdbfile):
molec.parsepdb(p.fipdbfile, checkforfi=True)
else:
if p.verbose: sys.stderr.write("Can't open %s ...\n" % p.fipdbfile)
sys.exit()
else:
sys.stderr.write('You should never come here !\n')
if p.method != 'none':
molec.calculatefimap(p.method, p.spacing, p.nexp)
if p.dxfile:
pass
else:
p.dxfile = molec.name+'.dx'
if os.path.isfile(p.dxfile):
timestamp = time.strftime('%Y-%m-%d_%H%M%S')
bckpdxfile = p.dxfile+'.bckp_'+timestamp
shutil.copy(p.dxfile, bckpdxfile)
if p.verbose: sys.stdout.write('Old %s was backed up as %s\n' % (
p.dxfile, bckpdxfile))
molec.writedxfile(p.dxfile)
else:
if p.verbose:
sys.stdout.write("pyMLP didn't calculate anything\n\n")
if __name__ == '__main__':
sys.exit(main())
|
py | b40e3c8d8f4c55ab4a703f9696ad1e51c604fa57 | '''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_NV_viewport_array2'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_NV_viewport_array2',error_checker=_errors._error_checker)
|
py | b40e3d37a522141330e319b940f2b9abc968c444 | import os
class Program():
def __init__(self):
self.content = ""
self.in_text = [0]
self.out_text = -2
def add_in(self, index):
if index not in self.in_text:
self.in_text.append(index)
def add_content(self, line):
self.content+=line.replace('HOOKED',"LUMA")
def set_out(self, index):
self.out_text = index
class Parser():
def __init__(self, algo):
self.texture_name_list = ["HOOKED", "LUMA"]
self.prog_list = []
self.double = False
self.highQ = False
self.copy = False
with open(os.path.dirname(os.path.realpath(__file__)) + '/' + algo, 'r') as fp:
new_prog = False
comment = False
program = None
for line in fp:
if comment:
if"*/" in line: comment = False
continue
if "/*" in line:
comment = True
continue
# not really work in every case
if line[:3] == "//!":
tex_name = line.split()[1]
if not new_prog:
new_prog=True
if program != None: self.prog_list.append(program)
program = Program()
if line[3:7] == "BIND":
program.add_in(self.texture_name_list.index(tex_name))
elif line[3:7] == "HEIG":
self.double = True
elif line[3:7] == "QUAL":
self.highQ = True
elif line[3:7] == "COPY":
self.copy = True
elif line[3:7] == "SAVE":
if tex_name not in self.texture_name_list:
self.texture_name_list.append(tex_name)
program.set_out(self.texture_name_list.index(tex_name))
else:
new_prog = False
if program: program.add_content(line)
self.prog_list.append(program)
for a in self.prog_list:
a.in_text = list(set([k-1 if k>1 else 0 for k in a.in_text]))
a.out_text = a.out_text-2
self.texture_name_list = self.texture_name_list[1:]
self.prog_list[-1].set_out(len(self.texture_name_list)-1)
|
py | b40e3ea53bbba0daa3a933b89327f5785768fb4b | from collections import OrderedDict
import os.path
import shutil
import pytest
from edalize import get_edatool
tests_dir = os.path.dirname(__file__)
class TestFixture:
"""A fixture that makes an edalize backend with work_root directory
Create this object using the make_edalize_test factory fixture. This passes
through its `tool_name` and sets up a temporary directory for `work_root`,
then passes its keyword arguments through to the TestFixture initializer.
Args:
tool_name: The name of the tool
work_root: The directory to treat as a work root
test_name: The name to call the backend. Defaults to
`'test_<tool_name>_0'`
param_types: A list of parameter types. Defaults to `['plusarg',
'vlogdefine', 'vlogparam']` (the parameter types supported
by most simulators).
files: A list of files to use. Defaults to `None`, which means to use
:py:data:`FILES`.
tool_options: Dictionary passed to _setup_backend. Defaults to `{}`.
ref_dir: A reference directory relative to `test_<tool_name>`. Defaults
to `'.'`
use_vpi: If true, set up backend with definitions from :attr:`VPI`.
Defaults to `False`.
"""
def __init__(
self,
tool_name,
work_root,
test_name=None,
param_types=["plusarg", "vlogdefine", "vlogparam"],
files=None,
tool_options={},
ref_dir=".",
use_vpi=False,
toplevel="top_module",
):
raw_ref_dir = os.path.join(tests_dir, "test_" + tool_name, ref_dir)
self.test_name = (
"test_{}_0".format(tool_name) if test_name is None else test_name
)
self.ref_dir = os.path.normpath(raw_ref_dir)
self.work_root = work_root
self.backend = _setup_backend(
self.test_name,
tool_name,
param_types,
files,
tool_options,
work_root,
use_vpi,
toplevel,
)
def compare_files(self, files, ref_subdir="."):
"""Check some files in the work root match those in the ref directory
The files argument gives the list of files to check. These are
interpreted as paths relative to the work directory and relative to
self.ref_dir / ref_subdir.
This is a wrapper around edalize_common.compare_files: see its
documentation for how to use the :envvar:`GOLDEN_RUN` environment
variable to copy across a golden reference.
"""
ref_dir = os.path.normpath(os.path.join(self.ref_dir, ref_subdir))
return compare_files(ref_dir, self.work_root, files)
def copy_to_work_root(self, path):
shutil.copy(
os.path.join(self.ref_dir, path), os.path.join(self.work_root, path)
)
@pytest.fixture
def make_edalize_test(monkeypatch, tmpdir):
"""A factory fixture to make an edalize backend with work_root directory
The returned factory method takes a `tool_name` (the name of the tool) and
the keyword arguments supported by :class:`TestFixture`. It returns a
:class:`TestFixture` object, whose `work_root` is a temporary directory.
"""
# Prepend directory `mock_commands` to PATH environment variable
monkeypatch.setenv("PATH", os.path.join(tests_dir, "mock_commands"), ":")
created = []
def _fun(tool_name, **kwargs):
work_root = tmpdir / str(len(created))
work_root.mkdir()
fixture = TestFixture(tool_name, str(work_root), **kwargs)
created.append(fixture)
return fixture
return _fun
def compare_files(ref_dir, work_root, files):
"""Check that all *files* in *work_root* match those in *ref_dir*.
If the environment variable :envvar:`GOLDEN_RUN` is set, the *files* in
*work_root* are copied to *ref_dir* to become the new reference.
"""
for f in files:
reference_file = os.path.join(ref_dir, f)
generated_file = os.path.join(work_root, f)
assert os.path.exists(generated_file)
if "GOLDEN_RUN" in os.environ:
shutil.copy(generated_file, reference_file)
with open(reference_file) as fref, open(generated_file) as fgen:
assert fref.read() == fgen.read(), f
def param_gen(paramtypes):
"""Generate dictionary of definitions in *paramtypes* list."""
defs = OrderedDict()
for paramtype in paramtypes:
for datatype in ["bool", "int", "str"]:
if datatype == "int":
default = 42
elif datatype == "str":
default = "hello"
else:
default = True
defs[paramtype + "_" + datatype] = {
"datatype": datatype,
"default": default,
"description": "",
"paramtype": paramtype,
}
return defs
def _setup_backend(
name, tool, paramtypes, files, tool_options, work_root, use_vpi, toplevel
):
"""Set up a backend.
The backend is called *name*, is set up for *tool* with *tool_options*,
*paramtypes*, and, if *use_vpi* is ``True``, definitions from :attr:`VPI`.
If *files* is None, files are taken from :attr:`FILES`.
"""
parameters = param_gen(paramtypes)
_vpi = []
if use_vpi:
_vpi = VPI
for v in VPI:
for f in v["src_files"]:
_f = os.path.join(work_root, f)
if not os.path.exists(os.path.dirname(_f)):
os.makedirs(os.path.dirname(_f))
with open(_f, "a"):
os.utime(_f, None)
edam = {
"name": name,
"files": FILES if files is None else files,
"parameters": parameters,
"tool_options": {tool: tool_options},
"toplevel": toplevel,
"vpi": _vpi,
}
return get_edatool(tool)(edam=edam, work_root=work_root)
FILES = [
{"name": "qip_file.qip", "file_type": "QIP"},
{"name": "qsys_file", "file_type": "QSYS"},
{"name": "sdc_file", "file_type": "SDC"},
{"name": "bmm_file", "file_type": "BMM"},
{"name": "sv_file.sv", "file_type": "systemVerilogSource"},
{"name": "pcf_file.pcf", "file_type": "PCF"},
{"name": "ucf_file.ucf", "file_type": "UCF"},
{"name": "user_file", "file_type": "user"},
{"name": "tcl_file.tcl", "file_type": "tclSource"},
{"name": "waiver_file.waiver", "file_type": "waiver"},
{"name": "vlog_file.v", "file_type": "verilogSource"},
{"name": "vlog05_file.v", "file_type": "verilogSource-2005"},
{"name": "vlog_incfile", "file_type": "verilogSource", "is_include_file": True},
{"name": "vhdl_file.vhd", "file_type": "vhdlSource"},
{"name": "vhdl_lfile", "file_type": "vhdlSource", "logical_name": "libx"},
{"name": "vhdl2008_file", "file_type": "vhdlSource-2008"},
{"name": "xci_file.xci", "file_type": "xci"},
{"name": "xdc_file.xdc", "file_type": "xdc"},
{"name": "bootrom.mem", "file_type": "mem"},
{"name": "c_file.c", "file_type": "cSource"},
{"name": "cpp_file.cpp", "file_type": "cppSource"},
{"name": "c_header.h", "file_type": "cSource", "is_include_file": True},
{"name": "c_header.h", "file_type": "cppSource", "is_include_file": True},
{"name": "config.vbl", "file_type": "veribleLintRules"},
{"name": "verible_waiver.vbw", "file_type": "veribleLintWaiver"},
{"name": "verible_waiver2.vbw", "file_type": "veribleLintWaiver"},
{"name": "config.sby.j2", "file_type": "sbyConfigTemplate"},
{"name": "another_sv_file.sv", "file_type": "systemVerilogSource"},
{"name": "pdc_constraint_file.pdc", "file_type": "PDC"},
{"name": "pdc_floorplan_constraint_file.pdc", "file_type": "FPPDC"},
{"name": "lpf_file.lpf", "file_type": "LPF"},
]
"""Files of all supported file types."""
VPI = [
{
"src_files": ["src/vpi_1/f1", "src/vpi_1/f3"],
"include_dirs": ["src/vpi_1/"],
"libs": ["some_lib"],
"name": "vpi1",
},
{"src_files": ["src/vpi_2/f4"], "include_dirs": [], "libs": [], "name": "vpi2"},
]
"""Predefined VPI modules to build."""
|
py | b40e3f3435f90430e0e0ea74c75b4ed9fe3ff135 | #!/usr/bin/env python3
import torch
from torch.autograd import Function, Variable
from torch.nn.parameter import Parameter
import torch.optim as optim
import numpy as np
import numpy.random as npr
from mpc import mpc
from mpc.mpc import GradMethods, QuadCost, LinDx
import sys
from IPython.core import ultratb
sys.excepthook = ultratb.FormattedTB(mode='Verbose',
color_scheme='Linux', call_pdb=1)
import time
import os
import shutil
import pickle as pkl
import collections
import argparse
import setproctitle
# import setGPU
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--n_state', type=int, default=3)
parser.add_argument('--n_ctrl', type=int, default=3)
parser.add_argument('--T', type=int, default=5)
parser.add_argument('--save', type=str)
parser.add_argument('--work', type=str, default='work')
parser.add_argument('--no-cuda', action='store_true')
parser.add_argument('--seed', type=int, default=0)
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
t = '.'.join(["{}={}".format(x, getattr(args, x))
for x in ['n_state', 'n_ctrl', 'T']])
setproctitle.setproctitle('bamos.lqr.'+t+'.{}'.format(args.seed))
if args.save is None:
args.save = os.path.join(args.work, t, str(args.seed))
if os.path.exists(args.save):
shutil.rmtree(args.save)
os.makedirs(args.save, exist_ok=True)
device = 'cuda' if args.cuda else 'cpu'
n_state, n_ctrl = args.n_state, args.n_ctrl
n_sc = n_state+n_ctrl
expert_seed = 42
assert expert_seed != args.seed
torch.manual_seed(expert_seed)
Q = torch.eye(n_sc)
p = torch.randn(n_sc)
alpha = 0.2
expert = dict(
Q = torch.eye(n_sc).to(device),
p = torch.randn(n_sc).to(device),
A = (torch.eye(n_state) + alpha*torch.randn(n_state, n_state)).to(device),
B = torch.randn(n_state, n_ctrl).to(device),
)
fname = os.path.join(args.save, 'expert.pkl')
with open(fname, 'wb') as f:
pkl.dump(expert, f)
torch.manual_seed(args.seed)
A = (torch.eye(n_state) + alpha*torch.randn(n_state, n_state))\
.to(device).requires_grad_()
B = torch.randn(n_state, n_ctrl).to(device).requires_grad_()
# u_lower, u_upper = -10., 10.
u_lower, u_upper = None, None
delta = u_init = None
fname = os.path.join(args.save, 'losses.csv')
loss_f = open(fname, 'w')
loss_f.write('im_loss,mse\n')
loss_f.flush()
def get_loss(x_init, _A, _B):
F = torch.cat((expert['A'], expert['B']), dim=1) \
.unsqueeze(0).unsqueeze(0).repeat(args.T, n_batch, 1, 1)
x_true, u_true, objs_true = mpc.MPC(
n_state, n_ctrl, args.T,
u_lower=u_lower, u_upper=u_upper, u_init=u_init,
lqr_iter=100,
verbose=-1,
exit_unconverged=False,
detach_unconverged=False,
n_batch=n_batch,
)(x_init, QuadCost(expert['Q'], expert['p']), LinDx(F))
F = torch.cat((_A, _B), dim=1) \
.unsqueeze(0).unsqueeze(0).repeat(args.T, n_batch, 1, 1)
x_pred, u_pred, objs_pred = mpc.MPC(
n_state, n_ctrl, args.T,
u_lower=u_lower, u_upper=u_upper, u_init=u_init,
lqr_iter=100,
verbose=-1,
exit_unconverged=False,
detach_unconverged=False,
n_batch=n_batch,
)(x_init, QuadCost(expert['Q'], expert['p']), LinDx(F))
traj_loss = torch.mean((u_true - u_pred)**2) + \
torch.mean((x_true - x_pred)**2)
return traj_loss
opt = optim.RMSprop((A, B), lr=1e-2)
n_batch = 128
for i in range(5000):
x_init = torch.randn(n_batch,n_state).to(device)
traj_loss = get_loss(x_init, A, B)
opt.zero_grad()
traj_loss.backward()
opt.step()
model_loss = torch.mean((A-expert['A'])**2) + \
torch.mean((B-expert['B'])**2)
loss_f.write('{},{}\n'.format(traj_loss.item(), model_loss.item()))
loss_f.flush()
plot_interval = 100
if i % plot_interval == 0:
os.system('./plot.py "{}" &'.format(args.save))
print(A, expert['A'])
print('{:04d}: traj_loss: {:.4f} model_loss: {:.4f}'.format(
i, traj_loss.item(), model_loss.item()))
# except KeyboardInterrupt: TODO
# raise
# except Exception as e:
# # print(e)
# # pass
# raise
if __name__=='__main__':
main()
|
py | b40e3f7adf55275f79fd337416fad530d8ab145d | import csv
from PyQt5 import QtWidgets
from pyqtgraph import PlotWidget, plot
from PyQt5.QtGui import QColor, QPen
import pyqtgraph as pg
import sys # We need sys so that we can pass argv to QApplication
import os
filename = "btc.csv"
x = [i for i in range(2, 4539)]
y = []
with open(filename, 'r') as csvfile:
next(csvfile)
for elmt in csv.reader(csvfile):
if elmt[68] == "":
y.append(float(0))
else:
y.append(float(elmt[68]))
xN = [i for i in range(2, 4538)]
dydx = []
for i in range(len(y) - 1):
dydx.append((y[i + 1] - y[i])/(x[i + 1] - x[i]))
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
self.graphWidget = pg.PlotWidget()
self.setCentralWidget(self.graphWidget)
# plot data: x, y values
self.graphWidget.setBackground('w')
self.graphWidget.plot(x, y, pen=QPen(QColor(0, 0, 0)))
self.graphWidget.plot(xN, dydx, pen=QPen(QColor(0, 0, 255)))
def main():
app = QtWidgets.QApplication(sys.argv)
main = MainWindow()
main.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
py | b40e3f9159ba088cde15b527feb67c22b0ade4b8 | # python3
# Copyright 2019 The gVisor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ABSL build benchmark."""
import re
SAMPLE_BAZEL_OUTPUT = """Extracting Bazel installation...
Starting local Bazel server and connecting to it...
Loading:
Loading: 0 packages loaded
Loading: 0 packages loaded
currently loading: absl/algorithm ... (11 packages)
Analyzing: 241 targets (16 packages loaded, 0 targets configured)
Analyzing: 241 targets (21 packages loaded, 617 targets configured)
Analyzing: 241 targets (27 packages loaded, 687 targets configured)
Analyzing: 241 targets (32 packages loaded, 1105 targets configured)
Analyzing: 241 targets (32 packages loaded, 1294 targets configured)
Analyzing: 241 targets (35 packages loaded, 1575 targets configured)
Analyzing: 241 targets (35 packages loaded, 1575 targets configured)
Analyzing: 241 targets (36 packages loaded, 1603 targets configured)
Analyzing: 241 targets (36 packages loaded, 1603 targets configured)
INFO: Analyzed 241 targets (37 packages loaded, 1864 targets configured).
INFO: Found 241 targets...
[0 / 5] [Prepa] BazelWorkspaceStatusAction stable-status.txt
[16 / 50] [Analy] Compiling absl/base/dynamic_annotations.cc ... (20 actions, 10 running)
[60 / 77] Compiling external/com_google_googletest/googletest/src/gtest.cc; 5s processwrapper-sandbox ... (12 actions, 11 running)
[158 / 174] Compiling absl/container/internal/raw_hash_set_test.cc; 2s processwrapper-sandbox ... (12 actions, 11 running)
[278 / 302] Compiling absl/container/internal/raw_hash_set_test.cc; 6s processwrapper-sandbox ... (12 actions, 11 running)
[384 / 406] Compiling absl/container/internal/raw_hash_set_test.cc; 10s processwrapper-sandbox ... (12 actions, 11 running)
[581 / 604] Compiling absl/container/flat_hash_set_test.cc; 11s processwrapper-sandbox ... (12 actions, 11 running)
[722 / 745] Compiling absl/container/node_hash_set_test.cc; 9s processwrapper-sandbox ... (12 actions, 11 running)
[846 / 867] Compiling absl/hash/hash_test.cc; 11s processwrapper-sandbox ... (12 actions, 11 running)
INFO: From Compiling absl/debugging/symbolize_test.cc:
/tmp/cclCVipU.s: Assembler messages:
/tmp/cclCVipU.s:1662: Warning: ignoring changed section attributes for .text
[999 / 1,022] Compiling absl/hash/hash_test.cc; 19s processwrapper-sandbox ... (12 actions, 11 running)
[1,082 / 1,084] Compiling absl/container/flat_hash_map_test.cc; 7s processwrapper-sandbox
INFO: Elapsed time: 81.861s, Critical Path: 23.81s
INFO: 515 processes: 515 processwrapper-sandbox.
INFO: Build completed successfully, 1084 total actions
INFO: Build completed successfully, 1084 total actions"""
def sample():
return SAMPLE_BAZEL_OUTPUT
# pylint: disable=unused-argument
def elapsed_time(data: str, **kwargs) -> float:
"""Returns the elapsed time for running an absl build."""
return float(re.compile(r"Elapsed time: (\d*.?\d*)s").search(data).group(1))
|
py | b40e406b4dc0bbd3ab0d2f3afc5be2b5b845e5ef | """
Django settings for digital_handwriting project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATES_DIR=os.path.join(BASE_DIR,'templates')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '+%apmq2_-amk9pus39&90nl1%wdel*bzf81(m#pp6j595o2m%^'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'handwriting',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'digital_handwriting.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATES_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'digital_handwriting.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
|
py | b40e407c39a2c64089e058e73909e9479d05eac0 | import dataclasses
from typing import List
@dataclasses.dataclass
class AverageableStack:
'''
A stack of numbers with a O(1) average() operation.
inv: self._total == sum(self._values)
'''
_values: List[int]
_total: int
def __init__(self):
self._values = []
self._total = 0
def push(self, val: int):
''' post: True '''
self._values.append(val)
self._total += val
def pop(self) -> int:
'''
pre: self._values
'''
val = self._values.pop()
self._total -= val
return val
def average(self) -> float:
''' pre: self._values '''
return self._total / len(self._values)
|
py | b40e408b37e6dd8105bf43f4263baf3d47a2814b | from builder.laikago_task_bullet import LaikagoTaskBullet
from builder.laikago_task import InitPose
import math
import random
class LaikagoStandUpBulletBase(LaikagoTaskBullet):
def __init__(self,
reward_mode='with_shaping',
run_mode='train',
contact_buffer_length=5):
super(LaikagoStandUpBulletBase, self).__init__(run_mode=run_mode,
reward_mode=reward_mode,
init_pose=InitPose.STAND,
contact_buffer_length=contact_buffer_length)
class LaikagoStandUpBulletPush(LaikagoStandUpBulletBase):
def __init__(self,
run_mode='train',
reward_mode='with_shaping',
force=True,
max_force=300,
force_delay_steps=10):
super(LaikagoStandUpBulletPush, self).__init__(run_mode=run_mode, reward_mode=reward_mode)
self.force = force
self._get_force_ori()
self.max_force = max_force
self.force_delay_steps = force_delay_steps
self.now_force = None
return
def _get_force_ori(self):
self.force_ori = []
f_ori = [[1, 0, 0], [0, 1, 0], [-1, 0, 0], [0, -1, 0], [0, 0, 1], [0, 0, -1]]
for i in f_ori:
for j in f_ori:
ori = [o[0] + o[1] for o in zip(i, j)]
self.force_ori.append(ori)
def _give_force(self):
if self.now_force is None or self.steps % self.force_delay_steps == 0:
force_id = random.randint(0, len(self.force_ori) - 1)
ori = self.force_ori[force_id]
self.now_force = [f * random.random() * self.max_force for f in ori]
return self.now_force
def update(self):
super(LaikagoStandUpBulletPush, self).update()
if not self.force:
return
else:
force = self._give_force()
self._env.transfer.laikago.apply_force(force)
class LaikagoStandUpBullet0(LaikagoStandUpBulletBase):
def __init__(self, run_mode='train', reward_mode='with_shaping'):
super(LaikagoStandUpBullet0, self).__init__(run_mode=run_mode,
reward_mode=reward_mode)
@property
def is_healthy(self):
return not (self.done_r_bullet(threshold=10) or
self.done_p_bullet(threshold=10) or
self.done_y_bullet(threshold=10) or
self.done_height_bullet(threshold=0.25) or
self.done_region_bullet(threshold=0.5) or
self.done_toe_distance(threshold=0.1))
def cal_phi_function(self):
sum = self.reward_r_bullet(threshold=10) + self.reward_p_bullet(threshold=10) + \
self.reward_y_bullet(threshold=10) + self.reward_height_bullet(threshold=0.25) + \
self.reward_region_bullet(threshold=0.5) + self.reward_toe_distance(threshold=0.1)
return sum
def update_reward(self):
if self.is_healthy:
self.add_reward(1, 1)
class LaikagoStandUpBullet0_1(LaikagoStandUpBulletBase):
def __init__(self, run_mode='train', reward_mode='with_shaping'):
super(LaikagoStandUpBullet0_1, self).__init__(run_mode=run_mode,
reward_mode=reward_mode)
@property
def is_healthy(self):
return not (self.done_r_bullet(threshold=10) or
self.done_p_bullet(threshold=10) or
self.done_y_bullet(threshold=10) or
self.done_height_bullet(threshold=0.25) or
self.done_region_bullet(threshold=0.5) or
self.done_toe_distance(threshold=0.1))
def cal_phi_function(self):
sum = self.reward_r_bullet(threshold=5) + self.reward_p_bullet(threshold=5) + \
self.reward_y_bullet(threshold=5) + self.reward_height_bullet(threshold=0.4) + \
self.reward_region_bullet(threshold=0.1) + self.reward_toe_distance(threshold=0.1)
return sum
def update_reward(self):
if self.is_healthy:
self.add_reward(1, 1)
class LaikagoStandUpBullet1(LaikagoStandUpBulletBase):
def __init__(self, run_mode='train', reward_mode='with_shaping'):
super(LaikagoStandUpBullet1, self).__init__(run_mode=run_mode,
reward_mode=reward_mode,
contact_buffer_length=2)
@property
def is_healthy(self):
return not (self.done_r_bullet(threshold=30) or
self.done_p_bullet(threshold=30) or
self.done_height_bullet(threshold=0.25) or
self.done_toe_distance(threshold=0.1) or
self.done_toe_contact_long(threshold=7))
def cal_phi_function(self):
sum = self.reward_r_bullet(threshold=30) + self.reward_p_bullet(threshold=30) + \
self.reward_height_bullet(threshold=0.25) + \
self.reward_toe_distance(threshold=0.1) + \
self.reward_toe_contact_long(threshold=7)
return sum
def update_reward(self):
if self.is_healthy:
self.add_reward(self.reward_energy(), 1)
class LaikagoStandUpBullet2(LaikagoStandUpBulletBase):
def __init__(self, run_mode='train', reward_mode='with_shaping'):
super(LaikagoStandUpBullet2, self).__init__(run_mode=run_mode,
reward_mode=reward_mode,
contact_buffer_length=3)
@property
def is_healthy(self):
return not (self.done_r_bullet(threshold=30) or
self.done_p_bullet(threshold=30) or
self.done_height_bullet(threshold=0.25) or
self.done_toe_distance(threshold=0.1) or
self.done_toe_contact_long(threshold=9))
def cal_phi_function(self):
sum = self.reward_r_bullet(threshold=30) + self.reward_p_bullet(threshold=30) + \
self.reward_height_bullet(threshold=0.25) + \
self.reward_toe_distance(threshold=0.1) + \
self.reward_toe_contact_long(threshold=9)
return sum
def update_reward(self):
if self.is_healthy:
self.add_reward(self.reward_energy(), 1) |
py | b40e40e3f20b0f503977d18c00835550f443be5f | #!/usr/bin/env python
import sys
import hub.lib.config as config
from hub.lib.client import Client
if __name__ == '__main__':
with open(sys.argv[1]) as f:
job = f.read()
client = Client('localhost')
response = client.create(job)
print 'Successfully submitted job with job id: %s' % response
print 'And body:'
print job
|
py | b40e4114947de8ec9e78b3683b602a3c830cf1bc | import numpy as np
from resnet3d import Resnet3DBuilder
# pseudo volumetric data
X_train = np.random.rand(10, 64, 64, 32, 1)
labels = np.random.randint(0, 2, size=[10])
y_train = np.eye(2)[labels]
# train
model = Resnet3DBuilder.build_resnet_50((64, 64, 32, 1), 2, multilabel=True)
model.compile(loss="categorical_crossentropy", optimizer="sgd")
model.fit(X_train, y_train, batch_size=10)
|
py | b40e4155df4948f1b1220647772e463b689c20af | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Visitor restricting traversal to only the public tensorflow API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.python.util import tf_inspect
class PublicAPIVisitor(object):
"""Visitor to use with `traverse` to visit exactly the public TF API."""
def __init__(self, visitor):
"""Constructor.
`visitor` should be a callable suitable as a visitor for `traverse`. It will
be called only for members of the public TensorFlow API.
Args:
visitor: A visitor to call for the public API.
"""
self._visitor = visitor
self._root_name = 'tf'
# Modules/classes we want to suppress entirely.
self._private_map = {
# Some implementations have this internal module that we shouldn't
# expose.
'tf.flags': ['cpp_flags'],
}
# Modules/classes we do not want to descend into if we hit them. Usually,
# system modules exposed through platforms for compatibility reasons.
# Each entry maps a module path to a name to ignore in traversal.
self._do_not_descend_map = {
'tf': [
'compiler',
'core',
'examples',
'flags', # Don't add flags
# TODO(drpng): This can be removed once sealed off.
'platform',
# TODO(drpng): This can be removed once sealed.
'pywrap_tensorflow',
# TODO(drpng): This can be removed once sealed.
'user_ops',
'python',
'tools',
'tensorboard',
],
## Everything below here is legitimate.
# It'll stay, but it's not officially part of the API.
'tf.app': ['flags'],
# Imported for compatibility between py2/3.
'tf.test': ['mock'],
}
@property
def private_map(self):
"""A map from parents to symbols that should not be included at all.
This map can be edited, but it should not be edited once traversal has
begun.
Returns:
The map marking symbols to not include.
"""
return self._private_map
@property
def do_not_descend_map(self):
"""A map from parents to symbols that should not be descended into.
This map can be edited, but it should not be edited once traversal has
begun.
Returns:
The map marking symbols to not explore.
"""
return self._do_not_descend_map
def set_root_name(self, root_name):
"""Override the default root name of 'tf'."""
self._root_name = root_name
def _is_private(self, path, name):
"""Return whether a name is private."""
# TODO(wicke): Find out what names to exclude.
return ((path in self._private_map and
name in self._private_map[path]) or
(name.startswith('_') and not re.match('__.*__$', name) or
name in ['__base__', '__class__']))
def _do_not_descend(self, path, name):
"""Safely queries if a specific fully qualified name should be excluded."""
return (path in self._do_not_descend_map and
name in self._do_not_descend_map[path])
def __call__(self, path, parent, children):
"""Visitor interface, see `traverse` for details."""
# Avoid long waits in cases of pretty unambiguous failure.
if tf_inspect.ismodule(parent) and len(path.split('.')) > 10:
raise RuntimeError('Modules nested too deep:\n%s.%s\n\nThis is likely a '
'problem with an accidental public import.' %
(self._root_name, path))
# Includes self._root_name
full_path = '.'.join([self._root_name, path]) if path else self._root_name
# Remove things that are not visible.
for name, child in list(children):
if self._is_private(full_path, name):
children.remove((name, child))
self._visitor(path, parent, children)
# Remove things that are visible, but which should not be descended into.
for name, child in list(children):
if self._do_not_descend(full_path, name):
children.remove((name, child))
|
py | b40e41f2d55255446022283e4518c55c5a62fde3 | import dataclasses
from typing import List
@dataclasses.dataclass(frozen=True)
class ImportPath:
names: List[str]
def __post_init__(self):
if any([name.find(".") >= 0 for name in self.names]):
raise ValueError(f"An invalid name specified in `{self.names}`")
def __add__(self, other: "ImportPath") -> "ImportPath":
return ImportPath(names=self.names + other.names)
def join(self, name: str) -> "ImportPath":
"""
パスを追加します
:param name:
:return:
"""
return ImportPath(names=self.names + [name])
@property
def depth(self):
return len(self.names)
def path_in_depth(self, depth: int) -> "ImportPath":
"""
このパスを指定されたdepthだけ辿ったパスを新たに返します
:param depth:
:return:
"""
assert depth > 0
return ImportPath(names=self.names[:depth])
def belongs_to(self, other: "ImportPath") -> bool:
"""
パスがotherに含まれているかどうかを返します。
:param other:
:return:
"""
if self.depth < other.depth:
return False
return all([p1 == p2 for p1, p2 in zip(self.names, other.names)])
def __str__(self) -> str:
return ".".join(self.names)
@classmethod
def from_str(cls, path: str) -> "ImportPath":
names = path.split(".")
return cls(names=names)
def match_module_names(self, module_names: List[str]) -> bool:
return any(
[self.match_module_name(module_name) for module_name in module_names]
)
def match_module_name(self, module_name: str) -> bool:
path_str = str(self)
return path_str == module_name or path_str.startswith(module_name + ".")
|
py | b40e429a3b2b91cb1bac9410532972c35e6445b4 | from .Grid import Grid
|
py | b40e429def6fffe7199a144201d5ec898aad75f1 | # model settings
model = dict(
type="ImageClassifier",
backbone=dict(type="TIMMBackbone", model_name="efficientnetv2_m", pretrained=False),
neck=dict(type="GlobalAveragePooling"),
head=dict(
type="LinearClsHead",
num_classes=10,
in_channels=1280,
loss=dict(type="CrossEntropyLoss", loss_weight=1.0),
),
)
|
py | b40e429e5ef2e4f6d2cfbf539848285d6ef18523 | import cv2
import numpy as np
import os
from pathlib import Path
def draw_chessboard(row, col, size):
img = np.zeros([(row+1)*size, (col+1)*size])
colors = [0, 255]
for i in range(row+1):
for j in range(col+1):
img[i*size:(i+1)*size, j*size:(j+1)*size] = colors[j % 2]
colors = colors[::-1]
img = np.pad(img, ((120, 120), (150, 150)), constant_values=255)
cv2.imshow('img', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
def get_camera():
npzfile = np.load('calibrate.npz')
camera_matrix = npzfile['mtx']
file = Path(file_to_store)
if not file.is_dir():
file.mkdir(parents=True)
camera_store_path = file / 'camera.txt'
np.savetxt(str(camera_store_path), camera_matrix, fmt='%f', delimiter=' ')
def set_params(capture):
capture.set(cv2.CAP_PROP_FRAME_WIDTH, 1080);#宽度
capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 960);#高度
capture.set(cv2.CAP_PROP_FPS, 30);#帧率 帧/秒
capture.set(cv2.CAP_PROP_BRIGHTNESS, -100);#亮度
capture.set(cv2.CAP_PROP_CONTRAST,10);#对比度 40
capture.set(cv2.CAP_PROP_SATURATION, 50);#饱和度 50
capture.set(cv2.CAP_PROP_HUE, 50)#色调 50
capture.set(cv2.CAP_PROP_EXPOSURE, 10);#曝光 50 获取摄像头参数
def reduce_highlights(img):
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # 先轉成灰階處理
ret, thresh = cv2.threshold(img_gray, 200, 255, 0) # 利用 threshold 過濾出高光的部分,目前設定高於 200 即為高光
contours, hierarchy = cv2.findContours(thresh.copy(),cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
img_zero = np.zeros(img.shape, dtype=np.uint8)
for contour in contours:
x, y, w, h = cv2.boundingRect(contour)
img_zero[y:y+h, x:x+w] = 255
mask = img_zero
print("Highlight part: ")
# show_img(mask)
# alpha,beta 共同決定高光消除後的模糊程度
# alpha: 亮度的缩放因子,默認是 0.2, 範圍[0, 2], 值越大,亮度越低
# beta: 亮度缩放後加上的参数,默認是 0.4, 範圍[0, 2],值越大,亮度越低
result = cv2.illuminationChange(img, mask, alpha=0.2, beta=0.4)
# show_img(result)
return result
def get_calib_pic(size):
cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
# set_params(cap)
count = 1 # count 用来标志成功检测到的棋盘格画面数量
NumberofCalibrationImages = 10
Nx_cor = size[0]
Ny_cor = size[1]
# W = 640
# H = 480 #360
# print(cap.set(cv2.CAP_PROP_FRAME_WIDTH, W))
# print(cap.set(cv2.CAP_PROP_FRAME_HEIGHT, H))
print(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
print(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
while (True):
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# gray = cv2.resize(gray, (1920, 1080))
if cv2.waitKey(1) & 0xFF == ord(' '):
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(gray, (Nx_cor, Ny_cor), None) # Find the corners
# If found, add object points, image points
if ret == True:
file_path =file_to_store + '/' + 'input{}.jpg'.format(count)
cv2.imwrite(file_path, gray)
print('Num of imgs {}/{}'.format(count, NumberofCalibrationImages))
count += 1
if count > NumberofCalibrationImages:
break
else:
print('not find chessboard')
print(type(corners))
# Display the resulting frame
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
def find_chessboard(img_path, size):
assert os.path.exists(img_path)
img = cv2.imread(str(img_path))
# img = cv2.resize(img, (1920, 1080))
# img = reduce_highlights(img)
ok, corners = cv2.findChessboardCorners(img, size, None)
# show the detected corners
if ok:
for pt in corners:
point = pt[0]
cv2.circle(img, center=(int(point[0]), int(point[1])), radius=10, color=(0, 0, 255), thickness=-1)
cv2.imshow('img', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
else:
print ('cannot find chessboard points')
# select
select_input_index = [0, 50, 54]
select_model_index = [50, 0, 4]
# sort the results at the beginning of right bottom
corners = corners.reshape(corners.shape[0], corners.shape[2])
if corners[0].sum() < corners[size[0]*size[1]-1].sum():
corners = corners[::-1, :]
corners_select = corners[select_input_index, :]
img_path = Path(img_path)
file_name = img_path.stem
file_path = file_to_store + '/' + file_name + '.txt'
file_select_path = file_to_store + '/' + file_name + '_3p.txt'
if file_name == 'model':
corners = np.pad(corners, ((0, 0), (0, 1)), constant_values=0.0)
corners = corners * 0.2745
corners_select = corners[select_model_index, :]
np.savetxt(file_path, corners, fmt='%f', delimiter=' ')
np.savetxt(file_select_path, corners_select, fmt='%f', delimiter=' ')
def show_results(file_dir):
mat_list = []
for file in file_dir.iterdir():
if file.stem[0:3] == 'mat':
mat = np.loadtxt(file)
mat = mat.reshape(-1)[9:13].tolist() # show the transformation matrix
mat.append(float(file.stem[3:])) # average error
if mat[3] < 0.7:
mat_list.append(mat)
mat = np.array(mat_list)
np.set_printoptions(threshold=np.inf)
np.set_printoptions(suppress=True)
print(mat)
if __name__ == '__main__':
file_to_store = './data4'
size = (5, 11)
file_dir = Path(file_to_store)
# get intrinsic parameters from .npz file
get_camera()
# get calibrate pictures
get_calib_pic(size)
for file in file_dir.iterdir():
if file.suffix == '.jpg':
find_chessboard(file, size)
# show the results of the combined pictures
# show_results(file_dir)
|
py | b40e43d5db90d87bc2a360432c1ec8c794b6a526 | #!/usr/bin/python
##################################################################################
# VER 2.0 PR #
# #
# Permission is granted to anyone to use this software for any purpose, #
# excluding commercial applications, and to alter it and redistribute it #
# freely, subject to the following restrictions: #
# #
# 1. The origin of this software must not be misrepresented; you must not #
# claim that you wrote the original software. If you use this software #
# in a product, an acknowledgment in the product documentation is required. #
# #
# 2. Altered source versions must be plainly marked as such, and must not be #
# misrepresented as being the original software. #
# #
# 3. This notice may not be removed or altered from any source #
# distribution. #
# #
# #
# ==Created by Colton (Brandon) S. (@Coltonton) for the OpenPilot Community=== #
# === http://endoflinetech.com/eon-custom-themes === #
# #
# With a mission to rid all EONS of Comma.ai branding #
# And give the people the freedom, knowlage, and power! #
# & to make their EONS purdy! #
# #
# Grab life by the horns #
# #
# A very special thank you to @ShaneSmiskol for creating the theme picker #
# for his tireless help, and donating the life of his LeEco EON #
# to get the LeEco based EONs supported by this project #
# Although revived least we forget..... #
##################################################################################
# #
# To Restore A Theme Backup: #
# #
# SSH into your EON: #
# (https://medium.com/@jfrux/comma-eon-getting-connected-with-ssh-3ed6136e4a75) #
# #
# Type the following command if using the main project #
# exec /data/eon-custom-themes/restore_theme.py #
# #
# Or if trying to use the included package with an OP Fork: #
# cd /data/(your openpilot directory)/eon-custom-themes #
# exec ./restore_theme.py #
# #
# Now follow the prompts and make your selections! #
# Everything will be done automagically!!!!! #
# #
# Don't forget to tell your friends!! #
# Love Cole (@Coltonton) #
# #
# Did you know that if you have a custom OP fork you can use this #
# program to auto install your custom theme for your users automagiclly? #
# And incorparate it into your OP Fork? See ./developer/DEVREADME #
# #
##################################################################################
import os
import time
from os import path
from support.support_variables import BACKUPS_DIR, BACKUP_OPTIONS, CONTRIB_THEMES
from support.support_functions import get_device_theme_data, get_user_backups, is_affirmative, make_backup_folder, mark_self_installed, print_text
##======================= CODE START ================================================================
os.chdir(os.path.dirname(os.path.realpath(__file__))) # __file__ is safer since it doesn't change based on where this file is called from
print_text('restore') #Print welcome text with the flag for restore welcome text
EON_TYPE, BOOT_LOGO_THEME_PATH, BOOT_LOGO_PATH, BOOT_LOGO_NAME = get_device_theme_data() # Get Perams based off detected device
class ThemeRestorer:
def __init__(self): # Init code runs once. sets up.
self.backup_dir = make_backup_folder() # Create and get backup folder
self.theme_restore_loop() # Start main loop
def theme_restore_loop(self): # Theme_restorer!
# Backup_restore Loop
while 1:
self.selected_backup = get_user_backups(self.backup_dir)
if self.selected_backup is None:
print('Didn\'t select a backup, exiting.')
return
if self.selected_backup == 'Comma-Default':
self.restore_default_comma()
self.backup_get_available_options()
if self.backup_reinstall_function() == 'exit':
return
def backup_get_available_options(self): # Check what assets are available for the selected backup
# Check if the selected backup has a APK asset
#if os.path.exists('{}/{}/spinner'.format(BACKUPS_DIR, self.selected_backup)):
# BACKUP_OPTIONS.append('APK')
# Check if the selected backup has a boot logo asset
if os.path.exists('{}/{}/{}'.format(BACKUPS_DIR, self.selected_backup, BOOT_LOGO_NAME)):
BACKUP_OPTIONS.append('Boot Logo')
# Check if the selected backup has a boot annimation asset
if os.path.exists('{}/{}/bootanimation.zip'.format(BACKUPS_DIR, self.selected_backup)):
BACKUP_OPTIONS.append('Boot Animation')
# Check if the selected backup has a OpenPilot Spinner asset
if os.path.exists('{}/{}/spinner'.format(BACKUPS_DIR, self.selected_backup)):
BACKUP_OPTIONS.append('OpenPilot Spinner')
# Check if the selected backup has a APK asset
#if os.path.exists('{}/{}/spinner'.format(BACKUPS_DIR, self.selected_backup)):
# BACKUP_OPTIONS.append('APK')
# if os.path.exists('{}/{}/additional'.format(BACKUPS_DIR, self.selected_backup)): # todo disabled for now
# self.BACKUP_OPTIONS.append('4. Additional Resources')
BACKUP_OPTIONS.append('-Main Menu-')
BACKUP_OPTIONS.append('-Reboot-')
BACKUP_OPTIONS.append('-Quit-')
def backup_reinstall_function(self): # Backuo re-installer program, prompts user on what they want to do
while 1:
options = list(BACKUP_OPTIONS) # this only contains available options from self.get_available_options
if not len(options):
print('The selected backup has no resources available for your device! Try another.')
time.sleep(2)
return
print('What resources do you want to install for the {} backup?'.format(self.selected_backup))
for idx, theme in enumerate(options):
print('{}. {}'.format(idx + 1, theme))
indexChoice = int(input("Enter Index Value: "))
indexChoice -= 1
selected_option = BACKUP_OPTIONS[indexChoice]
# if selected_option == 'APK':
# print('Selected to install the APK backup. Continue?')
# if not is_affirmative():
# print('Not installing...')
# time.sleep(1.5)
# continue
# os.system('cp /data/openpilot/apk/ai.comma.plus.offroad.apk {}'.format(self.backup_dir)) # Make Backup
# os.system('dd if={}/{}/{} of={}'.format(BACKUPS_DIR, self.selected_backup, BOOT_LOGO_NAME, BOOT_LOGO_PATH)) # Replace
# print('\nBoot Logo re-installed successfully! Original backed up to {}'.format(self.backup_dir))
# print('Press enter to continue!')
# mark_self_installed() # Create flag in /sdcard so auto installer knows there is a self installation
# input()
# #Confirm user wants to install APK
# print('Selected to install the {} APK backup. Continue?'.format(self.selected_theme))
# if not is_affirmative():
# print('Not installing...')
# time.sleep(1.5)
# continue
# #Check if there was a backup already this session to prevent accidental overwrites
# if path.exists('{}/spinner'.format(self.backup_dir)):
# print('It appears you already made a APK install this session')
# print('continuing will overwrite the last APK backup')
# print('the program made this session already!!!')
# print('Would you like to continue and overwrite previous?')
# if not is_affirmative():
# print('Not installed, exiting session..... Please re-run program')
# exit() #Exit program if user does not want to overwrite, so they can start a new session
# else:
# os.mkdir('{}/spinner'.format(self.backup_dir))
# #Ask user if their OP directory is custom (like arnepilot / dragonpilot)
# print('Do you have an OP fork with a custom directory name? (ex. arnepilot, dragonpilot)') # Ask the user if their OP fork used a diffrent directory.
# if is_affirmative(): # Yes there is a custom OP dir
# print('What is the OP directory name? (case matters, not including /data/)')
# opdir = '/data/{}'.format(input('> ').strip('/')) # get custom dir name, strip slashes for safety
# print('Your openpilot directory is {}'.format(opdir))
# input('*** Please enter to continue, or Ctrl+C to abort if this is incorrect! ***')
# else:
# opdir = 'openpilot' #op directory is not custom so openpilot
if selected_option == 'Boot Logo':
print('Selected to install the Boot Logo backup. Continue?')
if not is_affirmative():
print('Not installing...')
time.sleep(1.5)
continue
os.system('cp {} {}'.format(BOOT_LOGO_PATH, self.backup_dir)) # Make Backup
os.system('dd if={}/{}/{} of={}'.format(BACKUPS_DIR, self.selected_backup, BOOT_LOGO_NAME, BOOT_LOGO_PATH)) # Replace
print('\nBoot Logo re-installed successfully! Original backed up to {}'.format(self.backup_dir))
print('Press enter to continue!')
mark_self_installed() # Create flag in /sdcard so auto installer knows there is a self installation
input()
elif selected_option == 'Boot Animation':
print('Selected to install the Boot Animation backup. Continue?')
if not is_affirmative():
print('Not installing...')
time.sleep(1.5)
continue
os.system('mount -o remount,rw /system') # /system read only, must mount as r/w
os.system('mv /system/media/bootanimation.zip {}/bootanimation'.format(self.backup_dir)) # backup
os.system('cp {}/{}/bootanimation/bootanimation.zip /system/media/bootanimation.zip'.format(BACKUPS_DIR, self.selected_backup)) # replace
os.system('chmod 666 /system/media/bootanimation.zip')
print('\nBoot Animation re-installed successfully! Original backed up to {}'.format(self.backup_dir))
print('Press enter to continue!')
mark_self_installed() # Create flag in /sdcard so auto installer knows there is a self installation
input()
elif selected_option == 'OpenPilot Spinner':
#Confirm user wants to install Spinner
print('Selected to install the {} OP Spinner backup. Continue?'.format(self.selected_theme))
if not is_affirmative():
print('Not installing...')
time.sleep(1.5)
continue
##Check if there was a spinner backup already this session to prevent accidental overwrites
#Returns false if okay to proceed. Gets self.backup_dir & asset type name
if backup_overide_check(self.backup_dir, 'spinner') == True:
exit()
##Ask user if their OP directory is custom (like arnepilot / dragonpilot)
opdir = op_dir_finder()
##Backup & Copy in relevant files
# Check if backup has a spinner logo
if path.exists('{}/{}/spinner/img_spinner_comma.png'.format(CONTRIB_THEMES, self.selected_theme)): #Backup does haz
os.system('mv /data/{}/selfdrive/assets/img_spinner_comma.png {}/spinner'.format(opdir, self.backup_dir)) #Backup logo
os.system('cp {}/{}/spinner/img_spinner_comma.png /data/{}/selfdrive/assets'.format(BACKUPS_DIR, self.selected_backup, opdir)) #Replace logo
# Check if backup has a spinner track
if path.exists('{}/{}/spinner/img_spinner_track.png'.format(CONTRIB_THEMES, self.selected_theme)): #Backup does haz
os.system('mv /data/{}/selfdrive/assets/img_spinner_track.png {}/spinner'.format(opdir, self.backup_dir)) #Backup sprinner track
os.system('cp {}/{}/spinner/img_spinner_track.png /data/{}/selfdrive/assets'.format(BACKUPS_DIR, self.selected_backup, opdir)) #Replace spinner
# Check if backup has a spinner.c
elif path.exists('{}/{}/spinner/spinner.c'.format(CONTRIB_THEMES, self.selected_theme)) and raveRainbow == False: #Backup does haz
os.system('mv /data/{}/selfdrive/common/spinner.c {}/spinner'.format(opdir, self.backup_dir)) #Backup spinner.c
os.system('cp {}/{}/spinner/spinner.c /data/{}/selfdrive/common'.format(BACKUPS_DIR, self.selected_backup, opdir)) #Replace spinner.c
#Final make new spinner & finish
os.system('cd /data/{}/selfdrive/ui/spinner && make'.format(opdir))
print('\n{} spinner re-installed successfully! Original backed up to {}'.format(opdir, self.backup_dir))
print('Press enter to continue!')
mark_self_installed() # Create flag in /sdcard so auto installer knows there is a self installation
input()
#elif selected_option == 'OpenPilot UI':
# print('Additional Resources are not an active feature')
# time.sleep(5)
elif selected_option == '-Main Menu-' or selected_option is None:
return
elif selected_option == '-Reboot-':
print('Rebooting.... Enjoy your old theme!!!')
os.system('am start -a android.intent.action.REBOOT') #create an android action to reboot
exit()
elif selected_option == '-Quit-':
print('Thank you come again! You will see your changes next reboot!')
exit()
def restore_default_comma(self):
print('Selected to restore Comma-Default theme. Continue?')
print('Process is fully automagic!')
if not is_affirmative():
print('Not restoring...')
time.sleep(1.5)
self.backup_reinstaller_loop()
os.system('cp {} {}'.format(BOOT_LOGO_PATH, self.backup_dir)) # Make Backup
os.system('dd if=./{}/{}/{} of={}'.format(CONTRIB_THEMES, self.selected_backup, BOOT_LOGO_THEME_PATH, BOOT_LOGO_PATH)) # Replace
print('Factory Boot Logo restored successfully! Custom file(s) backed up to {}\n'.format(self.backup_dir))
os.system('mount -o remount,rw /system') # /system read only, must mount as r/w
os.system('mv /system/media/bootanimation.zip {}'.format(self.backup_dir)) # backup
os.system('cp ./{}/{}/bootanimation.zip /system/media/bootanimation.zip'.format(CONTRIB_THEMES, self.selected_backup,)) # replace
os.system('chmod 666 /system/media/bootanimation.zip')
print('Factory Boot Animation restored successfully! Custom file(s) backed up to {}\n'.format(self.backup_dir))
print('Thank you come again!')
exit()
if __name__ == '__main__':
bi = ThemeRestorer()
|
py | b40e44d016a89b3bbdefe330f33af22c33d5ad39 | import re
import pyblish.api
import openpype.api
from openpype.hosts.photoshop import api as photoshop
class ValidateNamingRepair(pyblish.api.Action):
"""Repair the instance asset."""
label = "Repair"
icon = "wrench"
on = "failed"
def process(self, context, plugin):
# Get the errored instances
failed = []
for result in context.data["results"]:
if (result["error"] is not None and result["instance"] is not None
and result["instance"] not in failed):
failed.append(result["instance"])
invalid_chars, replace_char = plugin.get_replace_chars()
self.log.info("{} --- {}".format(invalid_chars, replace_char))
# Apply pyblish.logic to get the instances for the plug-in
instances = pyblish.api.instances_by_plugin(failed, plugin)
stub = photoshop.stub()
for instance in instances:
self.log.info("validate_naming instance {}".format(instance))
metadata = stub.read(instance[0])
self.log.info("metadata instance {}".format(metadata))
layer_name = None
if metadata.get("uuid"):
layer_data = stub.get_layer(metadata["uuid"])
self.log.info("layer_data {}".format(layer_data))
if layer_data:
layer_name = re.sub(invalid_chars,
replace_char,
layer_data.name)
stub.rename_layer(instance.data["uuid"], layer_name)
subset_name = re.sub(invalid_chars, replace_char,
instance.data["name"])
instance[0].Name = layer_name or subset_name
metadata["subset"] = subset_name
stub.imprint(instance[0], metadata)
return True
class ValidateNaming(pyblish.api.InstancePlugin):
"""Validate the instance name.
Spaces in names are not allowed. Will be replace with underscores.
"""
label = "Validate Naming"
hosts = ["photoshop"]
order = openpype.api.ValidateContentsOrder
families = ["image"]
actions = [ValidateNamingRepair]
# configured by Settings
invalid_chars = ''
replace_char = ''
def process(self, instance):
help_msg = ' Use Repair action (A) in Pyblish to fix it.'
msg = "Name \"{}\" is not allowed.{}".format(instance.data["name"],
help_msg)
assert not re.search(self.invalid_chars, instance.data["name"]), msg
msg = "Subset \"{}\" is not allowed.{}".format(instance.data["subset"],
help_msg)
assert not re.search(self.invalid_chars, instance.data["subset"]), msg
@classmethod
def get_replace_chars(cls):
"""Pass values configured in Settings for Repair."""
return cls.invalid_chars, cls.replace_char
|
py | b40e46519fb99ec27ceb7d3da780fa9a0f87af32 | import colorama
import os, sys
from . import config
def clear():
if sys.platform == "win32":
os.system("cls")
else:
os.system("clear")
def resize(columns, rows):
if sys.platform == "win32":
os.system(f"mode con cols={columns} lines={rows}")
else:
os.system(f"echo '\033[8;{rows};{columns}t'")
def print_banner():
copyright = f"( Flight v{config.VERSION} )"
banner = f"""███████╗██╗ ██╗ ██████╗ ██╗ ██╗████████╗
██╔════╝██║ ██║██╔════╝ ██║ ██║╚══██╔══╝
█████╗ ██║ ██║██║ ███╗███████║ ██║
██╔══╝ ██║ ██║██║ ██║██╔══██║ ██║
██║ ███████╗██║╚██████╔╝██║ ██║ ██║
╚═╝ ╚══════╝╚═╝ ╚═════╝ ╚═╝ ╚═╝ ╚═╝
"""
print(f"{colorama.Fore.LIGHTRED_EX}{colorama.Style.BRIGHT}")
for line in banner.splitlines():
print(f"{line}".center(os.get_terminal_size().columns))
print()
print(f"————————————————————————————————————( Flight v{config.VERSION} )—————————————————————————————————————")
print(f"{colorama.Style.RESET_ALL}")
def print_color(color, text):
print(color + text + colorama.Style.RESET_ALL)
def print_cmd(text):
print(f"{colorama.Fore.LIGHTBLUE_EX}{colorama.Style.BRIGHT}[COMMAND]{colorama.Style.RESET_ALL} {text}")
def print_info(text):
print(f"{colorama.Fore.LIGHTGREEN_EX}{colorama.Style.BRIGHT}[INFO]{colorama.Style.RESET_ALL} {text}")
def print_error(text):
print(f"{colorama.Fore.LIGHTRED_EX}{colorama.Style.BRIGHT}[ERROR]{colorama.Style.RESET_ALL} {text}")
def print_warning(text):
print(f"{colorama.Fore.LIGHTYELLOW_EX}{colorama.Style.BRIGHT}[WARNING]{colorama.Style.RESET_ALL} {text}")
def print_cli(text):
print(f"{colorama.Fore.LIGHTMAGENTA_EX}{colorama.Style.BRIGHT}[CLI]{colorama.Style.RESET_ALL} {text}") |
py | b40e47681957eca54dbd00a0e62256d40e286a2b | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'Christian Heider Nielsen'
__doc__ = r'''
Created on 03-12-2020
'''
import torch
import torchaudio
from torchaudio.datasets import COMMONVOICE
class CommonVoice(COMMONVOICE):
def __init__(self, root: str):
super().__init__(root, download=False)
yesno_data = torchaudio.datasets.COMMONVOICE('.', download=True)
data_loader = torch.utils.data.DataLoader(yesno_data,
batch_size=1,
shuffle=True,
num_workers=args.nThreads)
|
py | b40e481778b7f9879bed73073a6cd54a8a0e260f | # encoding=utf8
# This is temporary fix to import module from parent folder
# It will be removed when package is published on PyPI
import sys
sys.path.append('../')
# End of fix
from niapy.algorithms.modified import SelfAdaptiveDifferentialEvolution
from niapy.task import Task
from niapy.problems import Griewank
# we will run jDE algorithm for 5 independent runs
algo = SelfAdaptiveDifferentialEvolution(f_lower=0.0, f_upper=2.0, tao1=0.9, tao2=0.45, population_size=40,
differential_weight=0.5, crossover_probability=0.5)
for i in range(5):
task = Task(problem=Griewank(dimension=10, lower=-600, upper=600), max_evals=10000, enable_logging=True)
best = algo.run(task)
print('%s -> %s' % (best[0], best[1]))
print(algo.get_parameters())
|
py | b40e4847727edeab421454b3b468733d7b59f3ba | from unittest.mock import MagicMock
import pytest
from riotwatcher._apis.valorant import MatchApi
@pytest.fixture(params=["12345"])
def match_id(request):
return request.param
@pytest.fixture(params=["queue420"])
def queue(request):
return request.param
@pytest.mark.unit
@pytest.mark.val
class TestMatchApi:
def test_by_id(self, region, match_id):
mock_base_api = MagicMock()
expected_return = object()
mock_base_api.raw_request.return_value = expected_return
match = MatchApi(mock_base_api)
ret = match.by_id(region, match_id)
mock_base_api.raw_request.assert_called_once_with(
MatchApi.__name__,
match.by_id.__name__,
region,
f"https://{region}.api.riotgames.com/val/match/v1/matches/{match_id}",
{},
)
assert ret == expected_return
def test_matchlist_by_puuid(self, region, puuid):
mock_base_api = MagicMock()
expected_return = object()
mock_base_api.raw_request.return_value = expected_return
match = MatchApi(mock_base_api)
ret = match.matchlist_by_puuid(region, puuid)
mock_base_api.raw_request.assert_called_once_with(
MatchApi.__name__,
match.matchlist_by_puuid.__name__,
region,
f"https://{region}.api.riotgames.com/val/match/v1/matchlists/by-puuid/{puuid}",
{},
)
assert ret == expected_return
def test_recent_matches(self, region, queue):
mock_base_api = MagicMock()
expected_return = object()
mock_base_api.raw_request.return_value = expected_return
match = MatchApi(mock_base_api)
ret = match.recent_matches(region, queue)
mock_base_api.raw_request.assert_called_once_with(
MatchApi.__name__,
match.recent_matches.__name__,
region,
f"https://{region}.api.riotgames.com/val/match/v1/recent-matches/by-queue/{queue}",
{},
)
assert ret == expected_return
|
py | b40e488e044e462b7706bdbb6aa6589c8226ec1f | from datetime import datetime
from src.product import ProductBundleDiscount
class Product(object):
def __init__(self, name, sku, price, cost, stock_quantity, unit, bundle_discount=None):
"""
The product will contain it's name,
price, cost, sku, and price history.
>>> sweet_potato = Product(name='sweet_potato', sku='001', price=1.00, cost=0.50, stock_quantity=100, unit='Kg')
>>> sweet_potato.name = 'sweet_potato'
>>> sweet_potato.price = 1.00
>>> sweet_potato.cost = 0.50
>>> sweet_potato.sku = '001'
>>> sweet_potato.stock_quantity = 100
>>> sweet_potato.unit = 'Kg'
"""
self._validate_values(name, sku, price, cost)
self.name: str = name
self.sku: str = sku
self.price: float = float(price)
self.cost: float = float(cost)
self.price_history: [(float, datetime)] = self._create_price_history()
self.stock_quantity: int = stock_quantity
self.unit: str = unit
self.bundle_discount: ProductBundleDiscount = bundle_discount
@staticmethod
def _validate_values(name: str, sku: str, price: float, cost: float):
"""
Validates the class initializer parameters
:param name: name of the product
:param sku: stock keeping unit code
:param price: price that the consumer will pay for the product
:param cost: cost that the owner paid for the product
:raises TypeError:
"""
if not name or not isinstance(name, str):
raise TypeError('A correct name must be provided')
if not sku or not isinstance(sku, str):
raise TypeError('SKU must be provided')
if not price or not isinstance(price, (float, int)):
raise TypeError('Price must be provided')
if not cost or not isinstance(cost, (float, int)):
raise TypeError('Cost must be provided')
def _create_price_history(self):
now = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
return [(self.price, now)]
def __str__(self) -> str:
return self.name
|
py | b40e48fb28d63900bbf983af459880db1a878bc0 | def test_cli_template():
assert 1==1
|
py | b40e4964ccb60424a1ff0e3f967de7c47993bc53 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('league', '0012_auto_20150912_1135'),
]
operations = [
migrations.AddField(
model_name='match',
name='league',
field=models.ForeignKey(to='league.MeetupLeague', default=2, related_name='match_league'),
preserve_default=False,
),
]
|
py | b40e49f8f375ca036cd9de61535b29eabea1a41a | # Author: Stan Fortoński
# Date: 02.05.2020
# Tinder Bot
import sys
from time import sleep
from random import randrange
from tinder.config import Config
import tinder.functions as fn
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import ElementClickInterceptedException
class TinderBot:
def __init__(self, driver):
self.driver = driver
self.__totalLikes = 0
self.__totalDislikes = 0
def perform(self, wait=True):
if 'app/recs' in self.driver.current_url:
try:
self.__doOutOfLikesPopup()
fn.waitForPeople(self.driver)
chanceToLike = randrange(1, 100)
if chanceToLike <= Config['chance_to_like']:
self.like()
else:
self.dislike()
if wait:
fn.waitRandomTime()
except:
self.driver.get('https://tinder.com/app/recs')
fn.waitRandomTime()
def __doOutOfLikesPopup(self):
driver = self.driver
try:
driver.find_element_by_xpath('/html/body/div[2]/div/div/div[1]/div[2]/div[1]/div/div[1]/div/div/span/div/div/div[1]/div')
print('Sorry, you do not have any likes for now. Try later.')
sys.exit()
except NoSuchElementException:
pass
def like(self):
success = None
likesButtons = ['/html/body/div[1]/div/div[1]/div/main/div[1]/div/div/div[1]/div[1]/div/div[4]/div/div[4]/button', '/html/body/div[1]/div/div[1]/div/main/div[1]/div/div/div[1]/div[1]/div/div[5]/div/div[4]/button']
for button in likesButtons:
try:
self.driver.find_element_by_xpath(button).click()
self.__totalLikes += 1
success = True
break
except (ElementClickInterceptedException, NoSuchElementException):
continue
if not success:
self.solveProblems()
sleep(2)
def dislike(self):
success = None
dislikesButtons = ['/html/body/div[1]/div/div[1]/div/main/div[1]/div/div/div[1]/div[1]/div/div[4]/div/div[2]/button', '/html/body/div[1]/div/div[1]/div/main/div[1]/div/div/div[1]/div[1]/div/div[5]/div/div[2]/button']
for button in dislikesButtons:
try:
self.driver.find_element_by_xpath(button).click()
self.__totalDislikes += 1
success = True
break
except (ElementClickInterceptedException, NoSuchElementException):
continue
if not success:
self.solveProblems()
sleep(2)
def solveProblems(self):
try:
self.driver.find_element_by_xpath('/html/body/div[2]/div/div/div/div[3]/button[2]').click()
except (ElementClickInterceptedException, NoSuchElementException):
pass
try:
self.driver.find_element_by_xpath('/html/body/div[2]/div/div/button[2]').click()
except (ElementClickInterceptedException, NoSuchElementException):
pass
try:
self.driver.find_element_by_xpath('/html/body/div[1]/div/div[1]/div/main/div[2]/div/div/div[1]/div/div[4]/button').click()
except (ElementClickInterceptedException, NoSuchElementException):
pass
def __str__(self):
total = self.getTotalActions()
return f'=== Tinder results ===\n* Total actions: {total}\n* Total likes: {self.__totalLikes}\n* Total disLikes: {self.__totalDislikes}'
def getTotalActions(self):
return self.__totalLikes + self.__totalDislikes
def getTotalLikes(self):
return self.__totalLikes
def getTotalDislikes(self):
return self.__totalDislikes |
py | b40e4a5654e9cd27ea313d39f7b84e0b25233149 | #!/usr/bin/env python3
#encoding: utf8
import sys,rospy,math
from pimouse_ros.msg import MotorFreqs
from geometry_msgs.msg import Twist
from std_srvs.srv import Trigger,TriggerResponse
from pimouse_ros.srv import TimedMotion
class Motor():
def __init__(self):
if not self.set_power(False): sys.exit(1)
rospy.on_shutdown(self.set_power)
self.sub_raw = rospy.Subscriber("motor_raw",MotorFreqs,self.callback_raw_freq)
self.sub_cmd_vel = rospy.Subscriber("cmd_vel",Twist,self.callback_cmd_vel)
self.srv_on = rospy.Service("motor_on",Trigger,self.callback_on)
self.srv_off = rospy.Service("motor_off",Trigger,self.callback_off)
self.srv_tm = rospy.Service("timed_motion",TimedMotion,self.callback_tm)
self.last_time = rospy.Time.now()
self.using_cmd_vel = False
def set_power(self,onoff=False):
en = "/dev/rtmotoren0"
try:
with open(en,"w") as f:
f.write("1\n" if onoff else "0\n")
self.is_on = onoff
return True
except:
rospy.logger("cannot write to" + en)
return False
def set_raw_freq(self,left_hz,right_hz):
if not self.is_on:
rospy.logerr("not enpowerd")
return
try:
with open("/dev/rtmotor_raw_l0","w") as lf ,open("/dev/rtmotor_raw_r0","w") as rf:
lf.write(str(int(round(left_hz)))+"\n")
rf.write(str(int(round(right_hz)))+"\n")
except:
rospy.logeer("cannot write to rtmotor_raw_*")
def callback_raw_freq(self,message):
self.set_raw_freq(message.left_hz,message.right_hz)
def callback_cmd_vel(self,message):
forward_hz = 80000.0*message.linear.x/(9*math.pi)
rot_hz = 400.0*message.angular.z/math.pi
self.set_raw_freq(forward_hz - rot_hz, forward_hz + rot_hz)
self.using_cmd_vel = True
self.last_time = rospy.Time.now()
def onoff_response(self,onoff):
d = TriggerResponse()
d.success = self.set_power(onoff)
d.message = "ON" if self.is_on else "OFF"
return d
def callback_on(self,message):
return self.onoff_response(True)
def callback_off(self,message):
return self.onoff_response(False)
def callback_tm(self,message):
if not self.is_on:
rospy.logerr("not enpowed")
return False
dev = "/dev/rtmotor0"
try:
with open(dev,"w") as f:
f.write("%d %d %d\n" % (message.left_hz,message.right_hz,message.duration_ms))
except:
rospy.logerr("cannot write to"+dev)
return False
return True
if __name__ == "__main__":
rospy.init_node("motors")
m = Motor()
rate = rospy.Rate(10)
while not rospy.is_shutdown():
if m.using_cmd_vel and rospy.Time.now().to_set() - m.last_time.to_sec() >= 1.0:
m.set_raw_freq(0,0)
m.using_cmd_vel = False
rate.sleep()
|
py | b40e4b41fde65f49ddeaafa2981e8141ef787fa7 | #!/usr/bin/env python
import rospy
from std_msgs.msg import Bool
from dbw_mkz_msgs.msg import ThrottleCmd, SteeringCmd, BrakeCmd, SteeringReport
from geometry_msgs.msg import TwistStamped
import math
from twist_controller import Controller
'''
You can build this node only after you have built (or partially built) the `waypoint_updater` node.
You will subscribe to `/twist_cmd` message which provides the proposed linear and angular velocities.
You can subscribe to any other message that you find important or refer to the document for list
of messages subscribed to by the reference implementation of this node.
One thing to keep in mind while building this node and the `twist_controller` class is the status
of `dbw_enabled`. While in the simulator, its enabled all the time, in the real car, that will
not be the case. This may cause your PID controller to accumulate error because the car could
temporarily be driven by a human instead of your controller.
We have provided two launch files with this node. Vehicle specific values (like vehicle_mass,
wheel_base) etc should not be altered in these files.
We have also provided some reference implementations for PID controller and other utility classes.
You are free to use them or build your own.
Once you have the proposed throttle, brake, and steer values, publish it on the various publishers
that we have created in the `__init__` function.
'''
class DBWNode(object):
def __init__(self):
rospy.init_node('dbw_node')
vehicle_mass = rospy.get_param('~vehicle_mass', 1736.35)
fuel_capacity = rospy.get_param('~fuel_capacity', 13.5)
brake_deadband = rospy.get_param('~brake_deadband', .1)
decel_limit = rospy.get_param('~decel_limit', -5)
accel_limit = rospy.get_param('~accel_limit', 1.)
wheel_radius = rospy.get_param('~wheel_radius', 0.2413)
wheel_base = rospy.get_param('~wheel_base', 2.8498)
steer_ratio = rospy.get_param('~steer_ratio', 14.8)
max_lat_accel = rospy.get_param('~max_lat_accel', 3.)
max_steer_angle = rospy.get_param('~max_steer_angle', 8.)
self.steer_pub = rospy.Publisher('/vehicle/steering_cmd',
SteeringCmd, queue_size=1)
self.throttle_pub = rospy.Publisher('/vehicle/throttle_cmd',
ThrottleCmd, queue_size=1)
self.brake_pub = rospy.Publisher('/vehicle/brake_cmd',
BrakeCmd, queue_size=1)
self.controller = Controller(vehicle_mass=vehicle_mass,
fuel_capacity=fuel_capacity,
brake_deadband=brake_deadband,
decel_limit=decel_limit,
accel_limit=accel_limit,
wheel_radius=wheel_radius,
wheel_base=wheel_base,
steer_ratio=steer_ratio,
max_lat_accel=max_lat_accel,
max_steer_angle=max_steer_angle)
rospy.Subscriber('/vehicle/dbw_enabled', Bool,
self.dbw_enabled_cb, queue_size=1)
rospy.Subscriber('/twist_cmd', TwistStamped,
self.twist_cb, queue_size=2)
rospy.Subscriber('/current_velocity', TwistStamped,
self.velocity_cb, queue_size=5)
self.current_vel = None
self.curr_ang_vel = None
self.dbw_enabled = None
self.linear_vel = None
self.angular_vel = None
self.throttle = self.steering = self.brake = 0
self.loop()
def loop(self):
rate = rospy.Rate(50) # 50Hz
while not rospy.is_shutdown():
if not None in (self.current_vel, self.linear_vel, self.angular_vel):
self.throttle, self.brake, self.steering = self.controller.control(
self.current_vel,
self.dbw_enabled,
self.linear_vel,
self.angular_vel)
if self.dbw_enabled:
self.publish(self.throttle, self.brake, self.steering)
rate.sleep()
def dbw_enabled_cb(self, msg):
self.dbw_enabled = msg
def twist_cb(self, msg):
self.linear_vel = msg.twist.linear.x
self.angular_vel = msg.twist.angular.z
def velocity_cb(self, msg):
self.current_vel = msg.twist.linear.x
def publish(self, throttle, brake, steer):
tcmd = ThrottleCmd()
tcmd.enable = True
tcmd.pedal_cmd_type = ThrottleCmd.CMD_PERCENT
tcmd.pedal_cmd = throttle
self.throttle_pub.publish(tcmd)
scmd = SteeringCmd()
scmd.enable = True
scmd.steering_wheel_angle_cmd = steer
self.steer_pub.publish(scmd)
bcmd = BrakeCmd()
bcmd.enable = True
bcmd.pedal_cmd_type = BrakeCmd.CMD_TORQUE
bcmd.pedal_cmd = brake
self.brake_pub.publish(bcmd)
if __name__ == '__main__':
DBWNode()
|
py | b40e4b484cd8418bdea7e63856e4f89b67e7fc54 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
DEPS = [
'recipe_engine/raw_io',
'repo',
'recipe_engine/step',
]
REPO_LIST_OUTPUT = """\
src/foo : foo
src/bar : bar
badline
"""
def RunSteps(api):
api.repo.init('http://manifest_url')
api.repo.init('http://manifest_url/manifest', '-b', 'branch')
api.repo.reset()
api.repo.clean()
api.repo.clean('-x')
api.repo.sync()
repos = api.repo.list()
assert repos == [('src/foo', 'foo'), ('src/bar', 'bar')]
api.step('repo list echo', ['echo', str(repos)])
def GenTests(api):
yield (api.test('setup_repo') +
api.step_data('repo list',
api.raw_io.stream_output(REPO_LIST_OUTPUT)))
|
py | b40e4bf6d2da3c5ae69ed2432daf88985cd65fde | # -*- coding: utf-8 -*-
#
# Copyright 2020-2022 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Resources management functions
"""
import sys
import bigml.api
from bigmler.utils import (dated, get_url, log_message, check_resource,
check_resource_error, log_created_resources)
from bigmler.reports import report
from bigmler.resourcesapi.common import set_basic_batch_args, map_fields, \
update_json_args
from bigmler.resourcesapi.common import FULL_FORMAT
def set_batch_topic_distribution_args( \
args, fields=None, dataset_fields=None):
"""Return batch topic distribution args dict
"""
batch_topic_distribution_args = set_basic_batch_args(args, args.name)
if args.fields_map_ and fields is not None:
if dataset_fields is None:
dataset_fields = fields
batch_topic_distribution_args.update({
"fields_map": map_fields(args.fields_map_,
fields, dataset_fields)})
if args.prediction_info == FULL_FORMAT:
batch_topic_distribution_args.update(all_fields=True)
if args.prediction_fields:
batch_topic_distribution_args.update(all_fields=False)
prediction_fields = []
for field in args.prediction_fields.split(args.args_separator):
field = field.strip()
if not field in dataset_fields.fields:
try:
field = dataset_fields.field_id(field)
except Exception as exc:
sys.exit(exc)
prediction_fields.append(field)
batch_topic_distribution_args.update(output_fields=prediction_fields)
if 'batch_topic_distribution' in args.json_args:
update_json_args(
batch_topic_distribution_args, args.json_args.get( \
'batch_topic_distribution'), fields)
return batch_topic_distribution_args
def create_batch_topic_distribution(topic_model, test_dataset,
batch_topic_distribution_args, args,
api=None, session_file=None,
path=None, log=None):
"""Creates remote batch topic distribution
"""
if api is None:
api = bigml.api.BigML()
message = dated("Creating batch topic distribution.\n")
log_message(message, log_file=session_file, console=args.verbosity)
batch_topic_distribution = api.create_batch_topic_distribution( \
topic_model, test_dataset, batch_topic_distribution_args, retries=None)
log_created_resources( \
"batch_topic_distribution", path,
bigml.api.get_batch_topic_distribution_id(batch_topic_distribution),
mode='a')
batch_topic_distribution_id = check_resource_error(
batch_topic_distribution,
"Failed to create batch topic distribution: ")
try:
batch_topic_distribution = check_resource( \
batch_topic_distribution, api.get_batch_topic_distribution,
raise_on_error=True)
except Exception as exception:
sys.exit("Failed to get a finished batch topic distribution: %s"
% str(exception))
message = dated("Batch topic distribution created: %s\n"
% get_url(batch_topic_distribution))
log_message(message, log_file=session_file, console=args.verbosity)
log_message("%s\n" % batch_topic_distribution_id, log_file=log)
if args.reports:
report(args.reports, path, batch_topic_distribution)
return batch_topic_distribution
|
py | b40e4c738bfdb5650c59f35923da5df9157499ca | # Optimization tools
import tensorflow as tf
import numpy as np
import tensorflow.keras.backend as K
from tensorflow.keras.callbacks import TensorBoard, Callback
class MyTensorboard(TensorBoard):
""" Tensorboard callback to store the learning rate at the end of each epoch.
"""
def __init__(self, **kwargs):
kwargs['histogram_freq'] = 0
kwargs['write_graph'] = False
kwargs['write_grads'] = False
kwargs['write_images'] = False
kwargs['embeddings_freq'] = 0
#kwargs['update_freq'] = 'epoch'
super(MyTensorboard, self).__init__(**kwargs)
def on_epoch_end(self, epoch, logs=None):
lr = K.eval(self.model.optimizer.lr)
lr_summary = tf.Summary(
value=[tf.Summary.Value(tag='lr', simple_value=lr)])
self.writer.add_summary(lr_summary, epoch)
self.writer.flush()
super(MyTensorboard, self).on_epoch_end(epoch, logs)
class MyLRScheduler(Callback):
def __init__(self, schedule_type = 'constant', decay = 0, step = 1, step_epochs = 0, max_epochs = 100, lr_start = 0, lr_end = 0, verbose=0):
super(MyLRScheduler, self).__init__()
self.schedule_type = schedule_type
self.decay = float(decay)
self.step = step
self.max_epochs = max_epochs
if step_epochs == 0:
self.step_epochs = np.arange(self.step, self.max_epochs, self.step)
else:
self.step_epochs = list(step_epochs)
self.lr_start = float(lr_start)
self.lr_end = float(lr_end)
self.verbose = verbose
def on_epoch_begin(self, epoch, logs=None):
if not hasattr(self.model.optimizer, 'lr'):
raise ValueError('Optimizer must have a "lr" attribute.')
lr = self.schedule(epoch)
if not isinstance(lr, (float, np.float32, np.float64)):
raise ValueError('The output of the "schedule" function '
'should be float.')
K.set_value(self.model.optimizer.lr, lr)
if self.verbose > 0:
print('\nEpoch %05d: LearningRateScheduler reducing learning '
'rate to %s.' % (epoch + 1, lr))
def schedule(self, epoch):
""" Defines the learning rate schedule. This is called at the begin of each epoch through the LearningRateScheduler callback.
Arguments:
epoch -- integer, current epoch, [0, #epochs-1]
Returns:
rate -- calculated learning rate
"""
if self.schedule_type == 'constant':
rate = self.lr_start
elif self.schedule_type == 'decay' or self.schedule_type == 'step':
i = np.searchsorted(self.step_epochs, epoch, side='right')
rate = self.lr_start * (self.decay ** i)
# elif self.schedule_type == 'step':
# rate = self.lr_start * (self.decay ** np.floor(epoch / self.step))
elif self.schedule_type == 'anneal':
rate = self.lr_start / (1 + self.decay * epoch)
elif self.schedule_type == 'clr_triangular':
e = epoch + self.step
c = np.floor(1 + e / (2 * self.step))
x = np.abs(e / self.step - 2 * c + 1)
rate = self.lr_end + (self.lr_start - self.lr_end) * \
np.maximum(0, (1 - x)) * float(self.decay**(c - 1))
elif self.schedule_type == 'clr_restarts':
c = np.floor(epoch / self.step)
x = 1 + np.cos((epoch % self.step) / self.step * np.pi)
rate = self.lr_end + 0.5 * (self.lr_start - self.lr_end) * x * self.decay**c
elif self.schedule_type == 'warmup':
# rate = self.lr_start * np.min(np.pow(epoch, -0.5), epoch / self.step)
if epoch <= self.step:
rate = self.lr_start * epoch / self.step
else:
rate = self.lr_start * (self.decay ** (epoch - self.step))
else:
raise ValueError('Not supported learning schedule.')
return float(rate)
|
py | b40e4ce28431743162d5d76a3098b2b64eb02dfe | """Import related views."""
import csv
import io
from reversion import revisions as reversion
from django.contrib.auth.decorators import (
login_required, permission_required, user_passes_test
)
from django.db import transaction
from django.shortcuts import render
from django.urls import reverse
from django.utils.encoding import smart_text
from django.utils.translation import ugettext as _
from modoboa.lib.exceptions import Conflict, ModoboaException
from .. import signals
from ..forms import ImportDataForm, ImportIdentitiesForm
@reversion.create_revision()
def importdata(request, formclass=ImportDataForm):
"""Generic import function
As the process of importing data from a CSV file is the same
whatever the type, we do a maximum of the work here.
:param request: a ``Request`` instance
:param typ: a string indicating the object type being imported
:return: a ``Response`` instance
"""
error = None
form = formclass(request.POST, request.FILES)
if form.is_valid():
try:
infile = io.TextIOWrapper(
request.FILES["sourcefile"].file, encoding="utf8")
reader = csv.reader(infile, delimiter=form.cleaned_data["sepchar"])
except csv.Error as inst:
error = smart_text(inst)
else:
try:
cpt = 0
for row in reader:
if not row:
continue
fct = signals.import_object.send(
sender="importdata", objtype=row[0].strip())
fct = [func for x_, func in fct if func is not None]
if not fct:
continue
fct = fct[0]
with transaction.atomic():
try:
fct(request.user, row, form.cleaned_data)
except Conflict:
if form.cleaned_data["continue_if_exists"]:
continue
raise Conflict(
_("Object already exists: %s"
% form.cleaned_data["sepchar"].join(row[:2]))
)
cpt += 1
msg = _("%d objects imported successfully" % cpt)
return render(request, "admin/import_done.html", {
"status": "ok", "msg": msg
})
except (ModoboaException) as e:
error = str(e)
return render(request, "admin/import_done.html", {
"status": "ko", "msg": error
})
@login_required
@permission_required("admin.add_domain")
def import_domains(request):
if request.method == "POST":
return importdata(request)
ctx = {
"title": _("Import domains"),
"action_label": _("Import"),
"action_classes": "submit",
"action": reverse("admin:domain_import"),
"formid": "importform",
"enctype": "multipart/form-data",
"target": "import_target",
"form": ImportDataForm(),
}
return render(request, "admin/import_domains_form.html", ctx)
@login_required
@user_passes_test(
lambda u: u.has_perm("core.add_user") or
u.has_perm("admin.add_alias")
)
def import_identities(request):
if request.method == "POST":
return importdata(request, ImportIdentitiesForm)
ctx = {
"title": _("Import identities"),
"action_label": _("Import"),
"action_classes": "submit",
"action": reverse("admin:identity_import"),
"formid": "importform",
"enctype": "multipart/form-data",
"target": "import_target",
"form": ImportIdentitiesForm()
}
return render(request, "admin/import_identities_form.html", ctx)
|
py | b40e4d0b039e710c03d5c5f75536f841525a2d29 | from __future__ import annotations
import inspect
import tokenize
from textwrap import dedent
from typing import Callable, List
TokensType = List[tokenize.TokenInfo]
processors = []
def processor(func: Callable[[TokensType], TokensType]) -> Callable[[TokensType], TokensType]:
processors.append(func)
return func
def get_validator_source(validator) -> str:
# get source code
if not hasattr(validator, '__code__'):
return ''
try:
lines, _ = inspect.getsourcelines(validator.__code__)
except OSError:
return ''
lines = dedent('\n'.join(lines)).split('\n')
# tokenize
tokens: TokensType
try:
tokens = _get_tokens(lines)
except tokenize.TokenError:
lines = _clear_lines(lines)
tokens = _get_tokens(lines)
# drop junk
for processor in processors:
tokens = processor(tokens)
# transform back to text
lines = tokenize.untokenize(tokens).split('\n')
lines = _clear_lines(lines)
if len(lines) > 1:
return ''
return ' '.join(lines).replace('_.', '').lstrip()
def _clear_lines(lines: list[str]) -> list[str]:
lines = [line.rstrip() for line in lines]
lines = [line for line in lines if line]
# drop trailing comma
if lines[-1] and lines[-1][-1] == ',':
lines[-1] = lines[-1][:-1]
return lines
def _get_tokens(lines: list[str]) -> list[tokenize.TokenInfo]:
tokens = tokenize.generate_tokens(iter(lines).__next__)
exclude = {tokenize.INDENT, tokenize.DEDENT, tokenize.ENDMARKER}
return [token for token in tokens if token.type not in exclude]
@processor
def _extract_def_name(tokens: TokensType) -> TokensType:
for token, next_token in zip(tokens, tokens[1:]):
if token.string == 'lambda':
return tokens
if token.string == '@':
return tokens
if token.string == 'def':
return [next_token]
if token.string == 'class':
return [next_token]
return tokens
@processor
def _drop_comments(tokens: TokensType) -> TokensType:
return [token for token in tokens if token.type != tokenize.COMMENT]
@processor
def _extract_decorator_args(tokens: TokensType) -> TokensType:
if not tokens:
return tokens
# drop decorator symbol
if tokens[0].string == '@':
tokens = tokens[1:]
# proceed only if is call of a deal decorator
if tokens[0].string != 'deal' or tokens[1].string != '.':
return tokens
# find where decorator starts
start = 0
for index, token in enumerate(tokens):
if token.string == '(':
start = index
break
else:
return tokens
start += 1
end = 0
for index, token in enumerate(tokens):
if token.string == ')':
end = index
return tokens[start:end]
@processor
def _extract_assignment(tokens: TokensType) -> TokensType:
start = 0
for index, token in enumerate(tokens):
if token.type == tokenize.OP and '=' in token.string:
start = index
break
if token.type not in (tokenize.NAME, tokenize.DOT, tokenize.NEWLINE):
return tokens
else:
return tokens
start += 1
return tokens[start:]
@processor
def _extract_lambda(tokens: TokensType) -> TokensType:
start = 0
for index, (token1, token2) in enumerate(zip(tokens, tokens[1:])):
if token1.string != '(':
continue
if token2.string != 'lambda':
continue
start = index + 1
break
else:
return tokens
end = 0
for index, token in enumerate(tokens[start:], start=start):
if token.string == ')':
end = index
return tokens[start:end]
@processor
def _extract_lambda_body(tokens: TokensType) -> TokensType:
# find where lambda starts
start = 0
for index, token in enumerate(tokens):
if token.string == 'lambda':
start = index + 1
break
else:
return tokens
# find where lambda body starts
for index, token in enumerate(tokens[start:], start=start):
if token.type == tokenize.OP and ':' in token.string:
start = index
break
else:
return tokens
start += 1
return tokens[start:]
@processor
def _fix_line_numbers(tokens: TokensType) -> TokensType:
if not tokens:
return tokens
diff = tokens[0].start[0] - 1
new_tokens = []
for token in tokens:
token = token._replace(
start=(token.start[0] - diff, token.start[1]),
end=(token.end[0] - diff, token.end[1]),
)
new_tokens.append(token)
return new_tokens
|
py | b40e4d117dd629c54f20ac9dc4a0fe9bd9cb7613 | import numpy
import time
import ctypes
from ..kdtree import kdtree
from . import expr
__all__ = [
'Points',
'points',
'zeros',
'ones',
'empty',
'zeros_like',
'ones_like',
'empty_like',
'rand',
'load'
]
class Final(type):
# copied from https://stackoverflow.com/questions/16056574
def __new__(cls, name, bases, classdict):
for b in bases:
if isinstance(b, Final):
raise TypeError("type " + b.__name__ +
" is not an acceptable base type")
return type.__new__(cls, name, bases, dict(classdict))
def _memaddr(obj):
return obj.ctypes.get_data()
def points(object, dtype=None, copy=True):
if not copy and isinstance(object, numpy.ndarray) \
and (dtype is None or dtype == object.dtype):
ret = object.view(Points)
else:
temp = numpy.array(object, dtype=dtype, copy=False)
ret = empty_like(temp)
ret[:] = temp
return ret
def zeros(shape, dtype=float):
ret = Points(shape=shape, dtype=dtype)
ret[:] = 0
return ret
def ones(shape, dtype=float):
ret = Points(shape=shape, dtype=dtype)
ret[:] = 1
return ret
def empty(shape, dtype=float):
return Points(shape=shape, dtype=dtype)
def zeros_like(a, dtype=None):
return zeros(a.shape, dtype=(a.dtype if dtype is None else dtype))
def ones_like(a, dtype=None):
return zeros(a.shape, dtype=(a.dtype if dtype is None else dtype))
def empty_like(a, dtype=None):
return zeros(a.shape, dtype=(a.dtype if dtype is None else dtype))
def rand(*dims):
ret = empty(shape=dims, dtype=float)
ret[:] = numpy.random.rand(*dims)
return ret
def load(file, **kwargs):
# wrapper around numpy.load
# TODO: this copies to numpy array, then to a Points object;
# find way to avoid this extra copy
return points(numpy.load(file, **kwargs))
class Points (numpy.ndarray):
_last_modified = dict()
# make Points non subclass-able to simplify write control
# TODO: are there any use cases for subclassing Points?
__metaclass__ = Final
def __new__(cls, *args, **kwargs):
return super(Points, cls).__new__(cls, *args, **kwargs)
def __array_finalize__(self, obj):
self._last_updated = None
self._tree = None
if obj is not None and not isinstance(obj, Points):
# arrived at here via view() of a non-Points object
raise TypeError('Detected attempt at creating Points-type '
'view on non-Points object.')
if obj is None and not self.flags.owndata:
raise TypeError('Detected attempt at creating Points-type '
'view on buffer object via __new__(buffer=...)')
if obj is None:
# arrived at here via __new__
self._memsize = self.size * self.dtype.itemsize
self._memloc = _memaddr(self)
elif _memaddr(self) < obj._memloc or \
_memaddr(self) >= obj._memloc + obj._memsize:
# arrived at here via copy()
self._memsize = self.size * self.dtype.itemsize
self._memloc = _memaddr(self)
else:
# arrived at here via slicing/indexing
# or view() of a Points object
self._memsize = obj._memsize
self._memloc = obj._memloc
# cannot set writeable flag to False here,
# because copy() performs assignment after __array_finalize__
def __init__(self, *args, **kwargs):
self.flags.writeable = False
def copy(self):
x = super(Points, self).copy()
x.flags.writeable = False
return x
def _record_modify_time(self):
Points._last_modified[self._memloc] = time.time()
def _update_kd_tree(self):
# if there is no recorded last modify time for self._memloc,
# then self has either not been modified yet since creation,
# or _last_modified dictionary has been cleared. Either way,
# the k-d tree needs updating; we set the last modify time to
# the current time to trigger this.
if Points._last_modified.get(self._memloc) is None:
Points._last_modified[self._memloc] = time.time()
# note: None < x, for any number x
build_time = None
if self._last_updated is None \
or self._last_updated <= Points._last_modified[self._memloc]:
# note: do not need to explicitly call __del__()
# as it is automatically called when overwritten
build_time = time.time()
self._tree = kdtree._build(self)
build_time = time.time() - build_time
self._last_updated = time.time() # record time *after* build
return build_time
def nbhds(self, queries=None, k=1, r=None, verbose=False):
self._update_kd_tree()
return kdtree._query(self._tree, queries=queries, k=k, dmax=r)
def NBHDS(self, queries=None, k=1, r=None, verbose=False):
return expr.nbhds_op(self, queries, k, r)
def _guard(self, f):
def f_guarded(*args, **kwargs):
if self.base is not None:
self.base.flags.writeable = True
self.flags.writeable = True
ret = None
try:
ret = f(*args, **kwargs)
finally:
self.flags.writeable = False
if self.base is not None:
self.base.flags.writeable = False
self._record_modify_time() # record time *after* computation
return ret
return f_guarded
# override methods that modify object content to
# record timestamp, signalling need for k-d tree update
# inplace arithmetic methods
# e.g. +=, -=, *=, /=, //=, %=, **=, <<=, >>=, &=, ^=, |=
def __iadd__(self, other):
return self._guard(super(Points, self).__iadd__)(other)
def __isub__(self, other):
return self._guard(super(Points, self).__isub__)(other)
def __imul__(self, other):
return self._guard(super(Points, self).__imul__)(other)
def __idiv__(self, other):
return self._guard(super(Points, self).__idiv__)(other)
def __itruediv__(self, other):
return self._guard(super(Points, self).__itruediv__)(other)
def __ifloordiv__(self, other):
return self._guard(super(Points, self).__ifloordiv__)(other)
def __imod__(self, other):
return self._guard(super(Points, self).__imod__)(other)
def __ipow__(self, other):
return self._guard(super(Points, self).__ipow__)(other)
def __ilshift__(self, other):
return self._guard(super(Points, self).__ilshift__)(other)
def __irshift__(self, other):
return self._guard(super(Points, self).__irshift__)(other)
def __iand__(self, other):
return self._guard(super(Points, self).__iand__)(other)
def __ixor__(self, other):
return self._guard(super(Points, self).__ixor__)(other)
def __ior__(self, other):
return self._guard(super(Points, self).__ior__)(other)
# indexing and slicing operator
def __setslice__(self, i, j, sequence):
return self._guard(super(Points, self).__setslice__)(i, j, sequence)
def __delslice__(self, i, j):
return self._guard(super(Points, self).__delslice__)(i, j)
def __getslice__(self, i, j):
return super(Points, self).__getslice__(i, j)
def __setitem__(self, key, value):
return self._guard(super(Points, self).__setitem__)(key, value)
def __delitem__(self, key):
return self._guard(super(Points, self).__delitem__)(key)
def __getitem__(self, key):
if isinstance(key, expr.expression):
return expr.index_op(
expr._make_expression(self), key, slice(None, None, None))
elif (isinstance(key, tuple) or isinstance(key, list)) \
and any([isinstance(x, expr.expression) for x in key]):
# key is a sequence containing at least one expression object
if len(key) == 2 and (
isinstance(key[0], expr.expression)
and isinstance(key[1], slice)
or isinstance(key[0], slice)
and isinstance(key[1], expr.expression)):
return expr.index_op(
expr._make_expression(self), key[0], key[1])
else:
raise TypeError(
'unsupported combination of types in index tuple: %s'
% repr((type(x) for x in key)))
else:
return super(Points, self).__getitem__(key)
|
py | b40e4efd1eb8bd51d7ddfb07ce58e343d1adc8d3 | # ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
"""Defines helpful model wrapper and utils for implicitly rewrapping the model to conform to explainer contracts."""
import logging
import warnings
import numpy as np
import pandas as pd
from sklearn.linear_model import SGDClassifier
from .constants import ModelTask, SKLearn
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'Starting from version 2.2.1', UserWarning)
module_logger = logging.getLogger(__name__)
module_logger.setLevel(logging.INFO)
try:
import torch
import torch.nn as nn
except ImportError:
module_logger.debug('Could not import torch, required if using a PyTorch model')
class _FunctionWrapper(object):
"""Wraps a function to reshape the input and output data.
:param function: The prediction function to evaluate on the examples.
:type function: function
"""
def __init__(self, function):
"""Wraps a function to reshape the input and output data.
:param function: The prediction function to evaluate on the examples.
:type function: function
"""
self._function = function
def _function_input_1D_wrapper(self, dataset):
"""Wraps a function that reshapes the input dataset to be 2D from 1D.
:param dataset: The model evaluation examples.
:type dataset: numpy.array
:return: A wrapped function.
:rtype: function
"""
if len(dataset.shape) == 1:
dataset = dataset.reshape(1, -1)
return self._function(dataset)
def _function_flatten(self, dataset):
"""Wraps a function that flattens the input dataset from 2D to 1D.
:param dataset: The model evaluation examples.
:type dataset: numpy.array
:return: A wrapped function.
:rtype: function
"""
return self._function(dataset).flatten()
def _function_2D_two_cols_wrapper_2D_result(self, dataset):
"""Wraps a function that creates two columns, [1-p, p], from 2D array of one column evaluation result.
:param dataset: The model evaluation examples.
:type dataset: numpy.array
:return: A wrapped function.
:rtype: function
"""
result = self._function(dataset)[:, 0]
return np.stack([1 - result, result], axis=-1)
def _function_2D_two_cols_wrapper_1D_result(self, dataset):
"""Wraps a function that creates two columns, [1-p, p], from evaluation result that is a 1D array.
:param dataset: The model evaluation examples.
:type dataset: numpy.array
:return: A wrapped function.
:rtype: function
"""
result = self._function(dataset)
return np.stack([1 - result, result], axis=-1)
def _function_2D_one_col_wrapper(self, dataset):
"""Wraps a function that creates one column in rare edge case scenario for multiclass one-class result.
:param dataset: The model evaluation examples.
:type dataset: numpy.array
:return: A wrapped function.
:rtype: function
"""
result = self._function(dataset)
return result.reshape(result.shape[0], 1)
def _convert_to_two_cols(function, examples):
"""In classification case, convert the function's output to two columns if it outputs one column.
:param function: The prediction function to evaluate on the examples.
:type function: function
:param examples: The model evaluation examples.
:type examples: numpy.array or list
:return: The function chosen from given model and classification domain.
:rtype: (function, str)
"""
# Add wrapper function to convert output to 2D array, check values to decide on whether
# to throw, or create two columns [1-p, p], or leave just one in multiclass one-class edge-case
result = function(examples)
# If the function gives a 2D array of one column, we will need to reshape it prior to concat
is_2d_result = len(result.shape) == 2
convert_to_two_cols = False
for value in result:
if value < 0 or value > 1:
raise Exception("Probability values outside of valid range: " + str(value))
if value < 1:
convert_to_two_cols = True
wrapper = _FunctionWrapper(function)
if convert_to_two_cols:
# Create two cols, [1-p, p], from evaluation result
if is_2d_result:
return (wrapper._function_2D_two_cols_wrapper_2D_result, ModelTask.Classification)
else:
return (wrapper._function_2D_two_cols_wrapper_1D_result, ModelTask.Classification)
else:
if is_2d_result:
return (function, ModelTask.Classification)
else:
return (wrapper._function_2D_one_col_wrapper, ModelTask.Classification)
class WrappedPytorchModel(object):
"""A class for wrapping a PyTorch model in the scikit-learn specification."""
def __init__(self, model):
"""Initialize the PytorchModelWrapper with the model and evaluation function."""
self._model = model
# Set eval automatically for user for batchnorm and dropout layers
self._model.eval()
def predict(self, dataset):
"""Predict the output using the wrapped PyTorch model.
:param dataset: The dataset to predict on.
:type dataset: interpret_community.dataset.dataset_wrapper.DatasetWrapper
"""
# Convert the data to pytorch Variable
if isinstance(dataset, pd.DataFrame):
dataset = dataset.values
wrapped_dataset = torch.Tensor(dataset)
with torch.no_grad():
result = self._model(wrapped_dataset).numpy()
# Reshape to 2D if output is 1D and input has one row
if len(dataset.shape) == 1:
result = result.reshape(1, -1)
return result
def predict_classes(self, dataset):
"""Predict the class using the wrapped PyTorch model.
:param dataset: The dataset to predict on.
:type dataset: interpret_community.dataset.dataset_wrapper.DatasetWrapper
"""
# Convert the data to pytorch Variable
if isinstance(dataset, pd.DataFrame):
dataset = dataset.values
wrapped_dataset = torch.Tensor(dataset)
with torch.no_grad():
result = self._model(wrapped_dataset)
result_len = len(result.shape)
if result_len == 1 or (result_len > 1 and result.shape[1] == 1):
result = np.where(result.numpy() > 0.5, 1, 0)
else:
result = torch.max(result, 1)[1].numpy()
# Reshape to 2D if output is 1D and input has one row
if len(dataset.shape) == 1:
result = result.reshape(1, -1)
return result
def predict_proba(self, dataset):
"""Predict the output probability using the wrapped PyTorch model.
:param dataset: The dataset to predict_proba on.
:type dataset: interpret_community.dataset.dataset_wrapper.DatasetWrapper
"""
return self.predict(dataset)
class BaseWrappedModel(object):
"""A base class for WrappedClassificationModel and WrappedRegressionModel."""
def __init__(self, model, eval_function, examples, model_task):
"""Initialize the WrappedClassificationModel with the model and evaluation function."""
self._eval_function = eval_function
self._model = model
self._examples = examples
self._model_task = model_task
def __getstate__(self):
"""Influence how BaseWrappedModel is pickled.
Removes _eval_function which may not be serializable.
:return state: The state to be pickled, with _eval_function removed.
:rtype: dict
"""
odict = self.__dict__.copy()
if self._examples is not None:
del odict['_eval_function']
return odict
def __setstate__(self, state):
"""Influence how BaseWrappedModel is unpickled.
Re-adds _eval_function which may not be serializable.
:param dict: A dictionary of deserialized state.
:type dict: dict
"""
self.__dict__.update(state)
if self._examples is not None:
eval_function, _ = _eval_model(self._model, self._examples, self._model_task)
self._eval_function = eval_function
class WrappedClassificationModel(BaseWrappedModel):
"""A class for wrapping a classification model."""
def __init__(self, model, eval_function, examples=None):
"""Initialize the WrappedClassificationModel with the model and evaluation function."""
super(WrappedClassificationModel, self).__init__(model, eval_function, examples, ModelTask.Classification)
def predict(self, dataset):
"""Predict the output using the wrapped classification model.
:param dataset: The dataset to predict on.
:type dataset: interpret_community.dataset.dataset_wrapper.DatasetWrapper
"""
is_sequential = str(type(self._model)).endswith("tensorflow.python.keras.engine.sequential.Sequential'>")
if is_sequential or isinstance(self._model, WrappedPytorchModel):
return self._model.predict_classes(dataset).flatten()
preds = self._model.predict(dataset)
if isinstance(preds, pd.DataFrame):
preds = preds.values.ravel()
# Handle possible case where the model has only a predict function and it outputs probabilities
# Note this is different from WrappedClassificationWithoutProbaModel where there is no predict_proba
# method but the predict method outputs classes
has_predict_proba = hasattr(self._model, SKLearn.PREDICT_PROBA)
if not has_predict_proba:
if len(preds.shape) == 1:
return np.argmax(preds)
else:
return np.argmax(preds, axis=1)
return preds
def predict_proba(self, dataset):
"""Predict the output probability using the wrapped model.
:param dataset: The dataset to predict_proba on.
:type dataset: interpret_community.dataset.dataset_wrapper.DatasetWrapper
"""
proba_preds = self._eval_function(dataset)
if isinstance(proba_preds, pd.DataFrame):
proba_preds = proba_preds.values
return proba_preds
class WrappedRegressionModel(BaseWrappedModel):
"""A class for wrapping a regression model."""
def __init__(self, model, eval_function, examples=None):
"""Initialize the WrappedRegressionModel with the model and evaluation function."""
super(WrappedRegressionModel, self).__init__(model, eval_function, examples, ModelTask.Regression)
def predict(self, dataset):
"""Predict the output using the wrapped regression model.
:param dataset: The dataset to predict on.
:type dataset: interpret_community.dataset.dataset_wrapper.DatasetWrapper
"""
preds = self._eval_function(dataset)
if isinstance(preds, pd.DataFrame):
preds = preds.values.ravel()
return preds
class WrappedClassificationWithoutProbaModel(object):
"""A class for wrapping a classifier without a predict_proba method.
Note: the classifier may not output numeric values for its predictions.
We generate a trival boolean version of predict_proba
"""
def __init__(self, model):
"""Initialize the WrappedClassificationWithoutProbaModel with the model."""
self._model = model
# Create a map from classes to index
self._classes_to_index = {}
for index, i in enumerate(self._model.classes_):
self._classes_to_index[i] = index
self._num_classes = len(self._model.classes_)
def predict(self, dataset):
"""Predict the output using the wrapped regression model.
:param dataset: The dataset to predict on.
:type dataset: interpret_community.dataset.dataset_wrapper.DatasetWrapper
"""
return self._model.predict(dataset)
def predict_proba(self, dataset):
"""Predict the output probability using the wrapped model.
:param dataset: The dataset to predict_proba on.
:type dataset: interpret_community.dataset.dataset_wrapper.DatasetWrapper
"""
predictions = self.predict(dataset)
# Generate trivial boolean array for predictions
probabilities = np.zeros((predictions.shape[0], self._num_classes))
for row_idx, pred_class in enumerate(predictions):
class_index = self._classes_to_index[pred_class]
probabilities[row_idx, class_index] = 1
return probabilities
def wrap_model(model, examples, model_task):
"""If needed, wraps the model in a common API based on model task and prediction function contract.
:param model: The model to evaluate on the examples.
:type model: model with a predict or predict_proba function.
:param examples: The model evaluation examples.
:type examples: interpret_community.dataset.dataset_wrapper.DatasetWrapper
:param model_task: Optional parameter to specify whether the model is a classification or regression model.
In most cases, the type of the model can be inferred based on the shape of the output, where a classifier
has a predict_proba method and outputs a 2 dimensional array, while a regressor has a predict method and
outputs a 1 dimensional array.
:type model_task: str
:return: The wrapper model.
:rtype: model
"""
return _wrap_model(model, examples, model_task, False)[0]
def _wrap_model(model, examples, model_task, is_function):
"""If needed, wraps the model or function in a common API based on model task and prediction function contract.
:param model: The model or function to evaluate on the examples.
:type model: function or model with a predict or predict_proba function
:param examples: The model evaluation examples.
:type examples: interpret_community.dataset.dataset_wrapper.DatasetWrapper
:param model_task: Optional parameter to specify whether the model is a classification or regression model.
In most cases, the type of the model can be inferred based on the shape of the output, where a classifier
has a predict_proba method and outputs a 2 dimensional array, while a regressor has a predict method and
outputs a 1 dimensional array.
:type model_task: str
:return: The function chosen from given model and chosen domain, or model wrapping the function and chosen domain.
:rtype: (function, str) or (model, str)
"""
if is_function:
return _eval_function(model, examples, model_task)
else:
try:
if isinstance(model, nn.Module):
# Wrap the model in an extra layer that converts the numpy array
# to pytorch Variable and adds predict and predict_proba functions
model = WrappedPytorchModel(model)
except (NameError, AttributeError):
module_logger.debug('Could not import torch, required if using a pytorch model')
if _classifier_without_proba(model):
model = WrappedClassificationWithoutProbaModel(model)
eval_function, eval_ml_domain = _eval_model(model, examples, model_task)
if eval_ml_domain == ModelTask.Classification:
return WrappedClassificationModel(model, eval_function, examples), eval_ml_domain
else:
return WrappedRegressionModel(model, eval_function, examples), eval_ml_domain
def _classifier_without_proba(model):
"""Returns True if the given model is a classifier without predict_proba, eg SGDClassifier.
:param model: The model to evaluate on the examples.
:type model: model with a predict or predict_proba function
:return: True if the given model is a classifier without predict_proba.
:rtype: bool
"""
return isinstance(model, SGDClassifier) and not hasattr(model, SKLearn.PREDICT_PROBA)
def _eval_model(model, examples, model_task):
"""Return function from model and specify the ML Domain using model evaluation on examples.
:param model: The model to evaluate on the examples.
:type model: model with a predict or predict_proba function
:param examples: The model evaluation examples.
:type examples: interpret_community.dataset.dataset_wrapper.DatasetWrapper
:param model_task: Optional parameter to specify whether the model is a classification or regression model.
In most cases, the type of the model can be inferred based on the shape of the output, where a classifier
has a predict_proba method and outputs a 2 dimensional array, while a regressor has a predict method and
outputs a 1 dimensional array.
:type model_task: str
:return: The function chosen from given model and chosen domain.
:rtype: (function, str)
"""
# TODO: Add more model types here
is_sequential = str(type(model)).endswith("tensorflow.python.keras.engine.sequential.Sequential'>")
if is_sequential or isinstance(model, WrappedPytorchModel):
if model_task == ModelTask.Regression:
return _eval_function(model.predict, examples, ModelTask.Regression)
result = model.predict_proba(examples.typed_wrapper_func(examples.dataset[0:1]))
if result.shape[1] == 1 and model_task == ModelTask.Unknown:
raise Exception("Please specify model_task to disambiguate model type since "
"result of calling function is 2D array of one column.")
else:
return _eval_function(model.predict_proba, examples, ModelTask.Classification)
else:
has_predict_proba = hasattr(model, SKLearn.PREDICT_PROBA)
# Note: Allow user to override default to use predict method for regressor
if has_predict_proba and model_task != ModelTask.Regression:
return _eval_function(model.predict_proba, examples, model_task)
else:
return _eval_function(model.predict, examples, model_task)
def _eval_function(function, examples, model_task, wrapped=False):
"""Return function and specify the ML Domain using function evaluation on examples.
:param function: The prediction function to evaluate on the examples.
:type function: function
:param examples: The model evaluation examples.
:type examples: interpret_community.dataset.dataset_wrapper.DatasetWrapper
:param model_task: Optional parameter to specify whether the model is a classification or regression model.
In most cases, the type of the model can be inferred based on the shape of the output, where a classifier
has a predict_proba method and outputs a 2 dimensional array, while a regressor has a predict method and
outputs a 1 dimensional array.
:type model_task: str
:param wrapped: Indicates if function has already been wrapped.
:type wrapped: bool
:return: The function chosen from given model and chosen domain.
:rtype: (function, str)
"""
# Try to run the function on a single example - if it doesn't work wrap
# it in a function that converts a 1D array to 2D for those functions
# that only support 2D arrays as input
examples_dataset = examples.dataset
if str(type(examples_dataset)).endswith(".DenseData'>"):
examples_dataset = examples_dataset.data
try:
result = function(examples.typed_wrapper_func(examples_dataset[0]))
if result is None:
raise Exception("Wrapped function returned None in model wrapper when called on dataset")
except Exception as ex:
# If function has already been wrapped, re-throw error to prevent stack overflow
if wrapped:
raise ex
wrapper = _FunctionWrapper(function)
return _eval_function(wrapper._function_input_1D_wrapper, examples, model_task, wrapped=True)
if len(result.shape) == 2:
# If the result of evaluation the function is a 2D array of 1 column,
# and they did not specify classifier or regressor, throw exception
# to force the user to disambiguate the results.
if result.shape[1] == 1:
if model_task == ModelTask.Unknown:
if isinstance(result, pd.DataFrame):
return (function, ModelTask.Regression)
raise Exception("Please specify model_task to disambiguate model type since "
"result of calling function is 2D array of one column.")
elif model_task == ModelTask.Classification:
return _convert_to_two_cols(function, examples_dataset)
else:
# model_task == ModelTask.Regression
# In case user specified a regressor but we have a 2D output with one column,
# we want to flatten the function to 1D
wrapper = _FunctionWrapper(function)
return (wrapper._function_flatten, model_task)
else:
if model_task == ModelTask.Unknown or model_task == ModelTask.Classification:
return (function, ModelTask.Classification)
else:
raise Exception("Invalid shape for prediction: "
"Regression function cannot output 2D array with multiple columns")
elif len(result.shape) == 1:
if model_task == ModelTask.Unknown:
return (function, ModelTask.Regression)
elif model_task == ModelTask.Classification:
return _convert_to_two_cols(function, examples_dataset)
return (function, model_task)
elif len(result.shape) == 0:
# single value returned, flatten output array
wrapper = _FunctionWrapper(function)
return (wrapper._function_flatten, model_task)
raise Exception("Failed to wrap function, may require custom wrapper for input function or model")
|
py | b40e4f77f1bf0fc756cd1f58d68fe660915107cf | # -*- coding: utf-8 -*-
r"""
malgan.detector
~~~~~~~~~~~~
Black box malware detector.
Based on the paper: "Generating Adversarial Malware Examples for Black-Box Attacks Based on GAN"
By Weiwei Hu and Ying Tan.
:copyright: (c) 2019 by Zayd Hammoudeh.
:license: MIT, see LICENSE for more details.
"""
from enum import Enum
from typing import Union
import numpy as np
import sklearn
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC
import torch
TorchOrNumpy = Union[np.ndarray, torch.Tensor]
# noinspection PyPep8Naming
class BlackBoxDetector:
r"""
Black box detector that intends to mimic an antivirus/anti-Malware program that detects whether
a specific program is either malware or benign.
"""
class Type(Enum):
r""" Learner algorithm to be used by the black-box detector """
DecisionTree = DecisionTreeClassifier()
LogisticRegression = LogisticRegression(solver='lbfgs', max_iter=int(1e6))
MultiLayerPerceptron = MLPClassifier()
RandomForest = RandomForestClassifier(n_estimators=100)
SVM = SVC(gamma="auto")
@staticmethod
def names():
r""" Builds the list of all enum names """
return [c.name for c in BlackBoxDetector.Type]
@staticmethod
def get_from_name(name):
r"""
Gets the enum item from the specified name
:param name: Name of the enum object
:return: Enum item associated with the specified name
"""
for c in BlackBoxDetector.Type:
if c.name == name:
return c
raise ValueError("Unknown enum \"%s\" for class \"%s\"", name, __class__.name)
def __init__(self, learner_type: 'BlackBoxDetector.Type'):
self.type = learner_type
# noinspection PyCallingNonCallable
self._model = sklearn.clone(self.type.value)
self.training = True
def fit(self, X: TorchOrNumpy, y: TorchOrNumpy):
r"""
Fits the learner. Supports NumPy and PyTorch arrays as input. Returns a torch tensor
as output.
:param X: Examples upon which to train
:param y: Labels for the examples
"""
if isinstance(X, torch.Tensor):
X = X.numpy()
if isinstance(y, torch.Tensor):
y = y.numpy()
self._model.fit(X, y)
self.training = False
def predict(self, X: TorchOrNumpy) -> torch.tensor:
r"""
Predict the labels for \p X
:param X: Set of examples for which label probabilities should be predicted
:return: Predicted value for \p X
"""
if self.training:
raise ValueError("Detector does not appear to be trained but trying to predict")
if torch.cuda.is_available():
X = X.cpu()
if isinstance(X, torch.Tensor):
X = X.numpy()
y = torch.from_numpy(self._model.predict(X)).float()
return y.cuda() if torch.cuda.is_available() else y
|
py | b40e4f7e84bc53160bafd291d5c8ea6b4b1f43bd | from Kaspa.modules.abstract_modules.abstractSubmodule import AbstractSubmodule
from Kaspa.modules.exceptions.impossibleActionError import ImpossibleActionError
from Kaspa.config import Config
class SpotifyModuleEn(AbstractSubmodule):
module_name = "Spotify"
language = "en"
key_regexes = dict()
def __init__(self):
self.key_regexes = {'(?i).*?(?=continue)+.+?(?=playback)+.': self.action_continue_playback,
'(?i).*?(?=pause)+.': self.action_play,
'(?i).*?(?=play)+.': self.action_play,
'(?i).*?(?=next)+.': self.action_next,
'(?i).*?(?=stop)+.': self.action_pause,
'(?i).*?(?=what)+.+?(?=song)+.': self.action_song_info}
def action_continue_playback(self, query):
communicator = query.get_communicator()
self.main_module.continue_playback()
communicator.say("I am now continuing your music playback.")
return
def action_pause(self, query):
communicator = query.get_communicator()
self.main_module.pause()
communicator.say("Music paused.")
return
def action_play(self, query):
communicator = query.get_communicator()
text = query.get_text()
try:
self.action_continue_playback(query)
return
except ImpossibleActionError:
pass
if self.main_module.current_song() is None:
self.main_module.play_saved()
communicator.say("Okay, playing your last added songs.")
return
# fetch all playlist macros from config file and search for matches in the query
playlists = Config.get_instance().get_section_content('playlists')
for playlist in playlists:
if playlist[0].lower() in text.lower():
self.main_module.play_playlist(playlist[1])
communicator.say("Okay, I'll now play the playlist" + playlist[0] + ".")
return
self.main_module.play()
communicator.say("Okay")
return
def action_next(self, query):
communicator = query.get_communicator()
self.main_module.next()
communicator.say("Okay")
return
def action_song_info(self, query):
communicator = query.get_communicator()
if self.main_module.current_song():
title, artist = self.main_module.current_song()
communicator.say("The song is " + title + " by " + artist + ".")
else:
communicator.say("There is no music loaded right now.")
|
py | b40e4fe16af0e0bb85b223583775529c7f1f32a3 | from django.contrib import admin
from .models import Comments, Profile,Image
# Register your models here.
admin.site.register(Profile)
admin.site.register(Image)
admin.site.register(Comments)
|
py | b40e501e0578830eb8ce16f4c59b15626a298d58 | # tested
def Main(j):
if j == 4:
raise Exception('hello')
return True
|
py | b40e51ea73931f499f7c27572eb42af13060e381 | #!/usr/bin/env python
"""
@author: Rohini Joshi
"""
import sys
import time
from DIRAC.Core.Base import Script
Script.parseCommandLine()
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.Interfaces.API.Dirac import Dirac
# TODO Use DIRAC python API to construct job
jdl = ''
jdl += 'JobName = "SKA SRC test payload";\n'
# TODO Use DIRAC API to get available sites instead of hard-coded list
jdl += """Parameters = {"LCG.UKI-NORTHGRID-MAN-HEP.uk", "LCG.UKI-LT2-IC-HEP.uk", "LCG.UKI-LT2-QMUL.uk", "LCG.UKI-NORTHGRID-LANCS-HEP.uk",
"CLOUD.CERN-PROD.ch", "CLOUD.RAL-LCG2.uk", "CLOUD.UK-CAM-CUMULUS.uk", "VAC.UKI-LT2-UCL-HEP.uk",
"VAC.UKI-NORTHGRID-MAN-HEP.uk", "VAC.UKI-NORTHGRID-LIV-HEP.uk", "VAC.UKI-SCOTGRID-GLASGOW.uk"};\n"""
jdl += 'Site = "%s";\n'
jdl += 'Platform = "EL7";\n'
jdl += 'Executable = "TestPayload.sh";\n'
jdl += 'InputSandbox = "TestPayload.sh";\n'
jdl += 'StdOutput = "StdOut";\n'
jdl += 'StdError = "StdErr";\n'
jdl += 'OutputSandbox = {"StdOut", "StdErr"};\n'
# Create a unique Job Group for this set of jobs
try:
diracUsername = getProxyInfo()['Value']['username']
except:
print 'Failed to get DIRAC username. No proxy set up?'
sys.exit(1)
jobGroup = diracUsername + time.strftime('.%Y%m%d%H%M%S')
jdl += 'JobGroup = "' + jobGroup + '";\n'
print 'Will submit this DIRAC JDL:'
print '====='
print jdl
print '====='
print
# Submit the job(s)
print 'Attempting to submit job(s) in JobGroup ' + jobGroup
print
dirac = Dirac()
result = dirac.submitJob(jdl)
print '====='
print 'Submission Result: ',result
print '====='
if result['OK']:
print 'Retrieve output with dirac-wms-job-get-output --JobGroup ' + jobGroup
else:
print 'There was a problem submitting your job(s) - see above!!!'
print
|
py | b40e53901a49b5a460acc0594d2e40eb5958ac3a | from random import expovariate
Avg_IAT = 2.0 # Average IAT
Sim_Time = 100 # Total simulation time
N = 0 # Count number of arrivals
clock = 0 # Simulation time
while clock <= Sim_Time:
N = N + 1
# Advance simulation clock
clock = clock + expovariate(1/Avg_IAT)
print('Total Number of Arrivals = ', N) |
py | b40e53ce8037bfa8fc74ddfad5657d9fea351d1d | # Copyright 2013 Devsim LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#### circ4.tcl
####
#### copy of circ1.tcl using a voltage source
import devsim
import test_common
devices=("MyDevice1", "MyDevice2")
region="MyRegion"
####
#### Meshing
####
test_common.CreateSimpleMesh(device=devices[0], region=region)
devsim.create_device(mesh="dog", device=devices[1])
for device in devices:
test_common.SetupResistorConstants(device=device, region="")
test_common.SetupInitialResistorSystem(device, region)
devsim.add_circuit_node(name="cnode0", variable_update="default")
devsim.add_circuit_node(name="cnode1", variable_update="default")
devsim.circuit_element(name="R1", n1="cnode1", n2=0, value=1e15)
devsim.circuit_element(name="V1", n1="cnode0", n2=0, value=0.0)
test_common.SetupInitialResistorContact(device="MyDevice1", contact="top", use_circuit_bias=True, circuit_node="MyDevice1_top")
test_common.SetupInitialResistorContact(device="MyDevice1", contact="bot", use_circuit_bias=True, circuit_node="MyDevice1_bot")
test_common.SetupInitialResistorContact(device="MyDevice2", contact="top", use_circuit_bias=True, circuit_node="MyDevice2_top")
test_common.SetupInitialResistorContact(device="MyDevice2", contact="bot", use_circuit_bias=True, circuit_node="MyDevice2_bot")
devsim.circuit_node_alias(node="cnode0", alias="MyDevice1_top")
devsim.circuit_node_alias(node="cnode1", alias="MyDevice2_top")
devsim.circuit_node_alias(node="cnode1", alias="MyDevice1_bot")
devsim.circuit_node_alias(node="GND", alias="MyDevice2_bot")
devsim.solve(type="dc", absolute_error=1.0, relative_error=1e-14, maximum_iterations=30)
for device in devices:
print(device)
for name in ("Potential", "IntrinsicElectrons"):
devsim.print_node_values(device=device, region=region, name=name)
for device in devices:
test_common.SetupCarrierResistorSystem(device=device, region=region)
test_common.SetupCarrierResistorContact(device="MyDevice1", contact="top", use_circuit_bias=True, circuit_node="MyDevice1_top")
test_common.SetupCarrierResistorContact(device="MyDevice1", contact="bot", use_circuit_bias=True, circuit_node="MyDevice1_bot")
test_common.SetupCarrierResistorContact(device="MyDevice2", contact="top", use_circuit_bias=True, circuit_node="MyDevice2_top")
test_common.SetupCarrierResistorContact(device="MyDevice2", contact="bot", use_circuit_bias=True, circuit_node="MyDevice2_bot")
for v in (0.0, 0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.10):
devsim.circuit_alter(name="V1", value=v)
devsim.solve(type="dc", absolute_error=1.0, relative_error=1e-9, maximum_iterations=30)
for device in devices:
test_common.printResistorCurrent(device=device, contact="top")
test_common.printResistorCurrent(device=device, contact="bot")
|
py | b40e550251748df8bc9ccd41d69e1a7b000b26bc | """SCons.Tool.Packaging.tarbz2
The tarbz2 SRC packager.
"""
#
# Copyright (c) 2001 - 2017 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/packaging/src_tarbz2.py rel_3.0.0:4395:8972f6a2f699 2017/09/18 12:59:24 bdbaddog"
from SCons.Tool.packaging import putintopackageroot
def package(env, target, source, PACKAGEROOT, **kw):
bld = env['BUILDERS']['Tar']
bld.set_suffix('.tar.bz2')
target, source = putintopackageroot(target, source, env, PACKAGEROOT, honor_install_location=0)
return bld(env, target, source, TARFLAGS='-jc')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
py | b40e55454639fdf541783dc87b8cbd28014d9b56 | # Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import cuxfilter
from cuxfilter.layouts import feature_and_double_base
from clx.eda.summary_stats import SummaryStatistics
class EDA:
"""An EDA (Exploratory Data Analysis) Object. EDA is used to explore different features of a given dataframe.
:param dataframe: Dataframe to be used for analysis
:type dataframe: cudf.DataFrame
Examples
--------
>>> from clx.eda import EDA
>>> import cudf
>>> import pandas as pd
>>> df = cudf.DataFrame()
>>> df['a'] = [1,2,3,4]
>>> df['b'] = ['a','b','c','c']
>>> df['c'] = [True, False, True, True]
>>> df['d'] = cudf.Series(pd.date_range("2000-01-01", periods=3,freq="m"))
>>> eda = EDA(df)
>>> eda
{
"SummaryStatistics": {
"a": {
"dtype": "int64",
"summary": {
"unique": "4",
"total": "4"
}
},
"b": {
"dtype": "object",
"summary": {
"unique": "3",
"total": "4"
}
},
"c": {
"dtype": "bool",
"summary": {
"true_percent": "0.75"
}
},
"d": {
"dtype": "datetime64[ns]",
"summary": {
"timespan": "60 days, 2880 hours, 0 minutes, 0 seconds"
}
}
}
}
"""
eda_modules = {"SummaryStatistics": SummaryStatistics}
def __init__(self, dataframe):
self.__dataframe = dataframe
self.__analysis, self.__module_ref = self.__generate_analysis(dataframe)
@property
def analysis(self):
"""
Analysis results as a `dict`
"""
return self.__analysis
@property
def dataframe(self):
"""
Dataframe used for analysis
"""
return self.__dataframe
def __repr__(self):
return json.dumps(self.analysis, indent=2)
def __generate_analysis(self, dataframe):
"""For each of the modules, generate the analysis"""
module_ref = {}
analysis_results = {}
for key, eda_module in self.eda_modules.items():
eda_module_obj = eda_module(dataframe)
module_ref[key] = eda_module_obj
analysis_results[key] = eda_module_obj.analysis
return analysis_results, module_ref
def save_analysis(self, dirpath):
"""Save analysis output to directory path.
:param dirpath: Directory path to save analysis output.
:type dirpath: str
"""
for key, analysis in self.__module_ref.items():
if os.path.isdir(dirpath):
output_file = dirpath + "/" + key
analysis.save_analysis(output_file)
def cuxfilter_dashboard(self):
"""Create cuxfilter dashboard for Exploratory Data Analysis.
:return: cuxfilter dashboard with populated with data and charts.
:rtype: cuxfilter.DashBoard
"""
for module in self.__module_ref.values():
charts = module.charts
cux_df = cuxfilter.DataFrame.from_dataframe(self.__dataframe)
return cux_df.dashboard(
charts,
layout=feature_and_double_base,
theme=cuxfilter.themes.light,
title="Exploratory Data Analysis",
)
|
py | b40e55568ed8b178147621fc15e843737a350b84 | import urllib.parse
from . import zeep
__all__ = ("AsyncNetSuiteTransport",)
# TODO: ASYNC! Maybe remove this custom transport?!?!
class AsyncNetSuiteTransport(zeep.transports.AsyncTransport):
"""
NetSuite company-specific domain wrapper for zeep.transports.transport
Latest NetSuite WSDL now uses relative definition addresses
zeep maps reflective remote calls to the base WSDL address,
rather than the dynamic subscriber domain
Wrap the zeep transports service with our address modifications
"""
def __init__(self, wsdl_url, *args, **kwargs):
parsed = urllib.parse.urlparse(wsdl_url)
self._netsuite_base_url = f"{parsed.scheme}://{parsed.netloc}"
super().__init__(*args, **kwargs)
def _fix_address(self, address):
"""Munge the address to the company-specific domain, not the default"""
idx = address.index("/", 8)
path = address[idx:]
return f"{self._netsuite_base_url}{path}"
async def get(self, address, params, headers):
return await super().get(self._fix_address(address), params, headers)
async def post(self, address, message, headers):
return await super().post(self._fix_address(address), message, headers)
|
py | b40e55d363dac86a5dbcc7442fac3ea75ebe9026 | # Create a queue, using just stacks
class Stack:
def __init__(self):
self.items = []
def size(self):
return len(self.items)
def push(self, item):
self.items.append(item)
def pop(self):
if self.size() == 0:
return None
else:
return self.items.pop()
class Queue:
def __init__(self):
self.instorage = Stack()
self.outstorage = Stack()
def size(self):
return self.outstorage.size() + self.instorage.size()
def enqueue(self, item):
self.instorage.push(item)
def dequeue(self):
if not self.outstorage.items:
while self.instorage.items:
self.outstorage.push(self.instorage.pop())
return self.outstorage.pop()
# Setup
q = Queue()
q.enqueue(1)
q.enqueue(2)
q.enqueue(3)
# Test size
print("Pass" if (q.size() == 3) else "Fail")
# Test dequeue
print("Pass" if (q.dequeue() == 1) else "Fail")
# Test enqueue
q.enqueue(4)
print("Pass" if (q.dequeue() == 2) else "Fail")
print("Pass" if (q.dequeue() == 3) else "Fail")
print("Pass" if (q.dequeue() == 4) else "Fail")
q.enqueue(5)
print("Pass" if (q.size() == 1) else "Fail")
|
py | b40e55d6ce716c69d74c62153b0c8870829d0ddf | #!/usr/bin/env python
''' This python script will be used to detect the colour of packages using QR code detection. '''
import sys
import rospy
from std_msgs.msg import String
from std_srvs.srv import Empty
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
from pkg_task5.srv import camera_packages
from pkg_task5.srv import camera_packagesResponse
import actionlib
from pkg_ros_iot_bridge.msg import msgRosIotAction
# Message Class that is used by ROS Actions internally
from pkg_ros_iot_bridge.msg import msgRosIotGoal
# Message Class that is used for Action Goal Messages
from pkg_ros_iot_bridge.msg import msgRosIotResult
# Message Class that is used for Action Result Messages
from pkg_ros_iot_bridge.msg import msgRosIotFeedback
# Message Class that is used for Action Feedback Messages
from pkg_ros_iot_bridge.msg import msgMqttSub
# Message Class for MQTT Subscription Messages
from pyzbar.pyzbar import decode
import cv2
''' This class will initialize the 2D camera. '''
class Camera1:
def __init__(self):
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber("/eyrc/vb/camera_1/image_raw",
Image, self.camera1_callback)
## MQTT Client
# Initialize Action Client
self._ac = actionlib.ActionClient('/action_ros_iot',
msgRosIotAction)
param_config_iot = rospy.get_param('config_pyiot')
# Store the ROS Topic to get the start message from bridge action server
self._config_mqtt_pub_topic = param_config_iot['mqtt']['topic_pub']
# Dictionary to Store all the goal handels
self._goal_handles = {}
# Wait for Action Server that will use the action - '/action_iot_ros' to start
self._ac.wait_for_server()
rospy.loginfo("Action server up, we can send goals.")
self.image_captured_flag = False
''' Scanning the QR code of each package. '''
def get_qr_data(self,image):
packages_locations = [[305, 415,118, 228],[305, 415,305,415], [305, 415,500,595], [487, 597, 118, 228],
[487, 597,305, 415],[487, 597,495, 605], [635, 742, 118, 228], [635, 742, 305, 415],
[635, 742,495, 605], [785, 895, 118, 228],[785, 895, 310, 410], [785, 895,495, 605]]
packages = ['packagen00', 'packagen01', 'packagen02', 'packagen10', 'packagen11', 'packagen12',
'packagen20', 'packagen21', 'packagen22', 'packagen30', 'packagen31', 'packagen32']
colours = {}
for i in range(len(packages)):
start_x = packages_locations[i][0]
end_x = packages_locations[i][1]
start_y = packages_locations[i][2]
end_y = packages_locations[i][3]
cropped_image = image[start_x:end_x, start_y: end_y]
pack = packages[i]
qr_result = decode(cropped_image)
if len(qr_result)>0:
colours[pack] = qr_result[0].data
else:
continue
self._pc = colours
print 'Packages Detected = ' + str(len(self._pc))
''' Function to enhance the image. '''
def camera1_callback(self, data):
if not self.image_captured_flag:
try:
cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
rospy.logerr(e)
self.get_qr_data(cv_image)
self.image_captured_flag = True
''' Service to send package postion and it's colour. '''
def callback_service_on_request(self, req):
if req.get_package_type == True:
self.pack = str(self._pc)
return camera_packagesResponse(self.pack)
else:
rospy.loginfo_once("Package not found")
return camera_packagesResponse("")
''' This function will be called when there is a change of state in the Action Client State Machine. '''
def on_transition(self, goal_handle):
# from on_goal() to on_transition(). goal_handle generated by send_goal() is used here.
result = msgRosIotResult()
index = 0
for i in self._goal_handles:
if self._goal_handles[i] == goal_handle:
index = i
break
rospy.loginfo("Transition Callback. Client Goal Handle #: " + str(index))
rospy.loginfo("Comm. State: " + str(goal_handle.get_comm_state()))
rospy.loginfo("Goal Status: " + str(goal_handle.get_goal_status()))
# Comm State - Monitors the State Machine of the Client which is different from Server's
# Comm State = 2 -> Active
# Comm State = 3 -> Wating for Result
# Comm State = 7 -> Done
# if (Comm State == ACTIVE)
if goal_handle.get_comm_state() == 2:
rospy.loginfo(str(index) + ": Goal just went active.")
# if (Comm State == DONE)
if goal_handle.get_comm_state() == 7:
rospy.loginfo(str(index) + ": Goal is DONE")
rospy.loginfo(goal_handle.get_terminal_state())
# get_result() gets the result produced by the Action Server
result = goal_handle.get_result()
rospy.loginfo(result.flag_success)
if result.flag_success == True:
rospy.loginfo("Goal successfully completed. Client Goal Handle #: " + str(index))
else:
rospy.loginfo("Goal failed. Client Goal Handle #: " + str(index))
''' This function is used to send Goals to MQtt client. '''
def send_goal_to_mqtt_client(self, arg_protocol, arg_mode, arg_topic, arg_message):
# Create a Goal Message object
goal = msgRosIotGoal()
goal.protocol = arg_protocol
goal.mode = arg_mode
goal.topic = arg_topic
goal.message = arg_message
rospy.loginfo("Sending to mqtt client")
# self.on_transition - It is a function pointer to a function which will be called when
# there is a change of state in the Action Client State Machine
goal_handle = self._ac.send_goal(goal,
self.on_transition,
None)
return goal_handle
''' Main Function. '''
def main():
rospy.init_node('node_t5_qr_decode_service', anonymous=True)
rospy.sleep(6)
cam = Camera1()
# wait for process to finish
rospy.sleep(4)
packages = str(cam._pc)
print 'Packages Detected = '+ str(len(cam._pc))
rospy.loginfo_once(cam._pc)
s = rospy.Service('/2Dcamera_packages_type', camera_packages, cam.callback_service_on_request)
pkg = {'red':['R', 'HP', 'Medicine', '450'],
'green':['G', 'LP', 'Clothes', '150'],
'yellow':['Y', 'MP', 'Food', '250']}
count = 0
for i in cam._pc.keys():
count += 1
pkg_location = i[-2:]
colour = cam._pc[i]
pkg_sku = pkg[colour][0] + pkg_location + '0121'
pkg_item = pkg[colour][2]
pkg_priority = pkg[colour][1]
pkg_storage = 'R'+ pkg_location[0] +' C' + pkg_location[1]
pkg_cost = pkg[colour][3]
info = {'id':'Inventory',
'Team Id':'VB#693',
'Unique Id':'RRCneYRC',
'SKU':pkg_sku,
'Item':pkg_item,
'Priority':pkg_priority,
'Storage Number':pkg_storage,
'Cost':pkg_cost,
'Quantity':'1'}
message = str(info)
goal_handle = cam.send_goal_to_mqtt_client("spreadsheet", "pub",
cam._config_mqtt_pub_topic, message)
cam._goal_handles['Inventory:' + str(count)] = goal_handle
rospy.sleep(1)
rospy.loginfo("Goal Sent")
rospy.spin()
if __name__ == '__main__':
main()
|
py | b40e56999bb3dcb9ef356749daa7a5d3e4f95aa0 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['UserArgs', 'User']
@pulumi.input_type
class UserArgs:
def __init__(__self__, *,
access_string: pulumi.Input[str],
engine: pulumi.Input[str],
user_id: pulumi.Input[str],
user_name: pulumi.Input[str],
arn: Optional[pulumi.Input[str]] = None,
no_password_required: Optional[pulumi.Input[bool]] = None,
passwords: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a User resource.
:param pulumi.Input[str] access_string: Access permissions string used for this user. See [Specifying Permissions Using an Access String](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Clusters.RBAC.html#Access-string) for more details.
:param pulumi.Input[str] engine: The current supported value is `REDIS`.
:param pulumi.Input[str] user_id: The ID of the user.
:param pulumi.Input[str] user_name: The username of the user.
:param pulumi.Input[str] arn: The ARN of the created ElastiCache User.
:param pulumi.Input[bool] no_password_required: Indicates a password is not required for this user.
:param pulumi.Input[Sequence[pulumi.Input[str]]] passwords: Passwords used for this user. You can create up to two passwords for each user.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A list of tags to be added to this resource. A tag is a key-value pair.
"""
pulumi.set(__self__, "access_string", access_string)
pulumi.set(__self__, "engine", engine)
pulumi.set(__self__, "user_id", user_id)
pulumi.set(__self__, "user_name", user_name)
if arn is not None:
pulumi.set(__self__, "arn", arn)
if no_password_required is not None:
pulumi.set(__self__, "no_password_required", no_password_required)
if passwords is not None:
pulumi.set(__self__, "passwords", passwords)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="accessString")
def access_string(self) -> pulumi.Input[str]:
"""
Access permissions string used for this user. See [Specifying Permissions Using an Access String](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Clusters.RBAC.html#Access-string) for more details.
"""
return pulumi.get(self, "access_string")
@access_string.setter
def access_string(self, value: pulumi.Input[str]):
pulumi.set(self, "access_string", value)
@property
@pulumi.getter
def engine(self) -> pulumi.Input[str]:
"""
The current supported value is `REDIS`.
"""
return pulumi.get(self, "engine")
@engine.setter
def engine(self, value: pulumi.Input[str]):
pulumi.set(self, "engine", value)
@property
@pulumi.getter(name="userId")
def user_id(self) -> pulumi.Input[str]:
"""
The ID of the user.
"""
return pulumi.get(self, "user_id")
@user_id.setter
def user_id(self, value: pulumi.Input[str]):
pulumi.set(self, "user_id", value)
@property
@pulumi.getter(name="userName")
def user_name(self) -> pulumi.Input[str]:
"""
The username of the user.
"""
return pulumi.get(self, "user_name")
@user_name.setter
def user_name(self, value: pulumi.Input[str]):
pulumi.set(self, "user_name", value)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
"""
The ARN of the created ElastiCache User.
"""
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter(name="noPasswordRequired")
def no_password_required(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates a password is not required for this user.
"""
return pulumi.get(self, "no_password_required")
@no_password_required.setter
def no_password_required(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "no_password_required", value)
@property
@pulumi.getter
def passwords(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Passwords used for this user. You can create up to two passwords for each user.
"""
return pulumi.get(self, "passwords")
@passwords.setter
def passwords(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "passwords", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A list of tags to be added to this resource. A tag is a key-value pair.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class _UserState:
def __init__(__self__, *,
access_string: Optional[pulumi.Input[str]] = None,
arn: Optional[pulumi.Input[str]] = None,
engine: Optional[pulumi.Input[str]] = None,
no_password_required: Optional[pulumi.Input[bool]] = None,
passwords: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
user_id: Optional[pulumi.Input[str]] = None,
user_name: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering User resources.
:param pulumi.Input[str] access_string: Access permissions string used for this user. See [Specifying Permissions Using an Access String](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Clusters.RBAC.html#Access-string) for more details.
:param pulumi.Input[str] arn: The ARN of the created ElastiCache User.
:param pulumi.Input[str] engine: The current supported value is `REDIS`.
:param pulumi.Input[bool] no_password_required: Indicates a password is not required for this user.
:param pulumi.Input[Sequence[pulumi.Input[str]]] passwords: Passwords used for this user. You can create up to two passwords for each user.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A list of tags to be added to this resource. A tag is a key-value pair.
:param pulumi.Input[str] user_id: The ID of the user.
:param pulumi.Input[str] user_name: The username of the user.
"""
if access_string is not None:
pulumi.set(__self__, "access_string", access_string)
if arn is not None:
pulumi.set(__self__, "arn", arn)
if engine is not None:
pulumi.set(__self__, "engine", engine)
if no_password_required is not None:
pulumi.set(__self__, "no_password_required", no_password_required)
if passwords is not None:
pulumi.set(__self__, "passwords", passwords)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tags_all is not None:
pulumi.set(__self__, "tags_all", tags_all)
if user_id is not None:
pulumi.set(__self__, "user_id", user_id)
if user_name is not None:
pulumi.set(__self__, "user_name", user_name)
@property
@pulumi.getter(name="accessString")
def access_string(self) -> Optional[pulumi.Input[str]]:
"""
Access permissions string used for this user. See [Specifying Permissions Using an Access String](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Clusters.RBAC.html#Access-string) for more details.
"""
return pulumi.get(self, "access_string")
@access_string.setter
def access_string(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "access_string", value)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
"""
The ARN of the created ElastiCache User.
"""
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter
def engine(self) -> Optional[pulumi.Input[str]]:
"""
The current supported value is `REDIS`.
"""
return pulumi.get(self, "engine")
@engine.setter
def engine(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "engine", value)
@property
@pulumi.getter(name="noPasswordRequired")
def no_password_required(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates a password is not required for this user.
"""
return pulumi.get(self, "no_password_required")
@no_password_required.setter
def no_password_required(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "no_password_required", value)
@property
@pulumi.getter
def passwords(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Passwords used for this user. You can create up to two passwords for each user.
"""
return pulumi.get(self, "passwords")
@passwords.setter
def passwords(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "passwords", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A list of tags to be added to this resource. A tag is a key-value pair.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "tags_all")
@tags_all.setter
def tags_all(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags_all", value)
@property
@pulumi.getter(name="userId")
def user_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the user.
"""
return pulumi.get(self, "user_id")
@user_id.setter
def user_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_id", value)
@property
@pulumi.getter(name="userName")
def user_name(self) -> Optional[pulumi.Input[str]]:
"""
The username of the user.
"""
return pulumi.get(self, "user_name")
@user_name.setter
def user_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_name", value)
class User(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
access_string: Optional[pulumi.Input[str]] = None,
arn: Optional[pulumi.Input[str]] = None,
engine: Optional[pulumi.Input[str]] = None,
no_password_required: Optional[pulumi.Input[bool]] = None,
passwords: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
user_id: Optional[pulumi.Input[str]] = None,
user_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
## Import
ElastiCache users can be imported using the `user_id`, e.g.
```sh
$ pulumi import aws:elasticache/user:User my_user userId1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] access_string: Access permissions string used for this user. See [Specifying Permissions Using an Access String](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Clusters.RBAC.html#Access-string) for more details.
:param pulumi.Input[str] arn: The ARN of the created ElastiCache User.
:param pulumi.Input[str] engine: The current supported value is `REDIS`.
:param pulumi.Input[bool] no_password_required: Indicates a password is not required for this user.
:param pulumi.Input[Sequence[pulumi.Input[str]]] passwords: Passwords used for this user. You can create up to two passwords for each user.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A list of tags to be added to this resource. A tag is a key-value pair.
:param pulumi.Input[str] user_id: The ID of the user.
:param pulumi.Input[str] user_name: The username of the user.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: UserArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
## Import
ElastiCache users can be imported using the `user_id`, e.g.
```sh
$ pulumi import aws:elasticache/user:User my_user userId1
```
:param str resource_name: The name of the resource.
:param UserArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(UserArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
access_string: Optional[pulumi.Input[str]] = None,
arn: Optional[pulumi.Input[str]] = None,
engine: Optional[pulumi.Input[str]] = None,
no_password_required: Optional[pulumi.Input[bool]] = None,
passwords: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
user_id: Optional[pulumi.Input[str]] = None,
user_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = UserArgs.__new__(UserArgs)
if access_string is None and not opts.urn:
raise TypeError("Missing required property 'access_string'")
__props__.__dict__["access_string"] = access_string
__props__.__dict__["arn"] = arn
if engine is None and not opts.urn:
raise TypeError("Missing required property 'engine'")
__props__.__dict__["engine"] = engine
__props__.__dict__["no_password_required"] = no_password_required
__props__.__dict__["passwords"] = passwords
__props__.__dict__["tags"] = tags
if user_id is None and not opts.urn:
raise TypeError("Missing required property 'user_id'")
__props__.__dict__["user_id"] = user_id
if user_name is None and not opts.urn:
raise TypeError("Missing required property 'user_name'")
__props__.__dict__["user_name"] = user_name
__props__.__dict__["tags_all"] = None
super(User, __self__).__init__(
'aws:elasticache/user:User',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
access_string: Optional[pulumi.Input[str]] = None,
arn: Optional[pulumi.Input[str]] = None,
engine: Optional[pulumi.Input[str]] = None,
no_password_required: Optional[pulumi.Input[bool]] = None,
passwords: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
user_id: Optional[pulumi.Input[str]] = None,
user_name: Optional[pulumi.Input[str]] = None) -> 'User':
"""
Get an existing User resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] access_string: Access permissions string used for this user. See [Specifying Permissions Using an Access String](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Clusters.RBAC.html#Access-string) for more details.
:param pulumi.Input[str] arn: The ARN of the created ElastiCache User.
:param pulumi.Input[str] engine: The current supported value is `REDIS`.
:param pulumi.Input[bool] no_password_required: Indicates a password is not required for this user.
:param pulumi.Input[Sequence[pulumi.Input[str]]] passwords: Passwords used for this user. You can create up to two passwords for each user.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A list of tags to be added to this resource. A tag is a key-value pair.
:param pulumi.Input[str] user_id: The ID of the user.
:param pulumi.Input[str] user_name: The username of the user.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _UserState.__new__(_UserState)
__props__.__dict__["access_string"] = access_string
__props__.__dict__["arn"] = arn
__props__.__dict__["engine"] = engine
__props__.__dict__["no_password_required"] = no_password_required
__props__.__dict__["passwords"] = passwords
__props__.__dict__["tags"] = tags
__props__.__dict__["tags_all"] = tags_all
__props__.__dict__["user_id"] = user_id
__props__.__dict__["user_name"] = user_name
return User(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="accessString")
def access_string(self) -> pulumi.Output[str]:
"""
Access permissions string used for this user. See [Specifying Permissions Using an Access String](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Clusters.RBAC.html#Access-string) for more details.
"""
return pulumi.get(self, "access_string")
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
The ARN of the created ElastiCache User.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter
def engine(self) -> pulumi.Output[str]:
"""
The current supported value is `REDIS`.
"""
return pulumi.get(self, "engine")
@property
@pulumi.getter(name="noPasswordRequired")
def no_password_required(self) -> pulumi.Output[Optional[bool]]:
"""
Indicates a password is not required for this user.
"""
return pulumi.get(self, "no_password_required")
@property
@pulumi.getter
def passwords(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
Passwords used for this user. You can create up to two passwords for each user.
"""
return pulumi.get(self, "passwords")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A list of tags to be added to this resource. A tag is a key-value pair.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> pulumi.Output[Mapping[str, str]]:
return pulumi.get(self, "tags_all")
@property
@pulumi.getter(name="userId")
def user_id(self) -> pulumi.Output[str]:
"""
The ID of the user.
"""
return pulumi.get(self, "user_id")
@property
@pulumi.getter(name="userName")
def user_name(self) -> pulumi.Output[str]:
"""
The username of the user.
"""
return pulumi.get(self, "user_name")
|
py | b40e57fceee26cfe840bc362bf2decedf56528c0 | '''DISTRIBUTION STATEMENT A. Approved for public release: distribution unlimited.
This material is based upon work supported by the Assistant Secretary of Defense for
Research and Engineering under Air Force Contract No. FA8721-05-C-0002 and/or
FA8702-15-D-0001. Any opinions, findings, conclusions or recommendations expressed in this
material are those of the author(s) and do not necessarily reflect the views of the
Assistant Secretary of Defense for Research and Engineering.
Copyright 2015 Massachusetts Institute of Technology.
The software/firmware is provided to you on an As-Is basis
Delivered to the US Government with Unlimited Rights, as defined in DFARS Part
252.227-7013 or 7014 (Feb 2014). Notwithstanding any copyright notice, U.S. Government
rights in this work are defined by DFARS 252.227-7013 or DFARS 252.227-7014 as detailed
above. Use of this work other than as specifically authorized by the U.S. Government may
violate any copyrights that exist in this work.
'''
import base64
import binascii
import configparser
import distutils.spawn
import hashlib
import os
import re
import sys
import tempfile
import threading
import time
import zlib
import yaml
import codecs
from distutils.version import LooseVersion, StrictVersion
try:
from yaml import CSafeLoader as SafeLoader, CSafeDumper as SafeDumper
except ImportError:
from yaml import SafeLoader as SafeLoader, SafeDumper as SafeDumper
try:
import simplejson as json
except ImportError:
raise("Simplejson is mandatory, please install")
import M2Crypto
from M2Crypto import m2
from keylime import cmd_exec
from keylime import common
from keylime import keylime_logging
from keylime import secure_mount
from keylime import tpm_abstract
from keylime import tpm_ek_ca
logger = keylime_logging.init_logging('tpm2')
# Read the config file
config = configparser.RawConfigParser()
config.read(common.CONFIG_FILE)
class tpm2(tpm_abstract.AbstractTPM):
def __init__(self, need_hw_tpm=False):
tpm_abstract.AbstractTPM.__init__(self, need_hw_tpm)
# Shared lock to serialize access to tools
self.tpmutilLock = threading.Lock()
self.__get_tpm2_tools()
# We don't know which algs the TPM supports yet
self.supported['encrypt'] = set()
self.supported['hash'] = set()
self.supported['sign'] = set()
# Grab which default algs the config requested
defaultHash = config.get('cloud_agent', "tpm_hash_alg")
defaultEncrypt = config.get('cloud_agent', "tpm_encryption_alg")
defaultSign = config.get('cloud_agent', "tpm_signing_alg")
if self.need_hw_tpm:
# Start up the TPM
self.__startup_tpm()
# Figure out which algorithms the TPM supports
self.__get_tpm_algorithms()
# Ensure TPM supports the defaults requested
if defaultHash not in self.supported['hash']:
raise Exception('Unsupported hash algorithm specified: %s!'%(defaultHash))
if defaultEncrypt not in self.supported['encrypt']:
raise Exception('Unsupported encryption algorithm specified: %s!'%(defaultEncrypt))
if defaultSign not in self.supported['sign']:
raise Exception('Unsupported signing algorithm specified: %s!'%(defaultSign))
else:
# Assume their defaults are sane?
pass
self.defaults['hash'] = defaultHash
self.defaults['encrypt'] = defaultEncrypt
self.defaults['sign'] = defaultSign
def get_tpm_version(self):
return 2
def __get_tpm2_tools(self):
global tools_version
retDict = self.__run("tpm2_startup --version")
code = retDict['code']
output = ''.join(common.list_convert(retDict['retout']))
errout = ''.join(common.list_convert(retDict['reterr']))
if code != tpm_abstract.AbstractTPM.EXIT_SUCESS:
raise Exception("Error establishing tpm2-tools version using TPM2_Startup: %s"+str(code)+": "+str(errout))
# Extract the `version="x.x.x"` from tools
version_str = re.search(r'version="([^"]+)"', output).group(1)
# Extract the full semver release number.
tools_version = version_str.split("-")
if StrictVersion(tools_version[0]) >= StrictVersion("4.0.0"):
logger.info(f"TPM2-TOOLS Version: {tools_version[0]}")
tools_version = "4.0"
elif StrictVersion(tools_version[0]) >= StrictVersion("3.2.0"):
logger.info(f"TPM2-TOOLS Version: {tools_version[0]}")
tools_version = "3.2"
else:
logger.error(f"TPM2-TOOLS Version {tools_version[0]} is not supported.")
exit()
def __get_tpm_algorithms(self):
vendorStr = None
if tools_version == "3.2":
retDict = self.__run("tpm2_getcap -c algorithms")
elif tools_version == "4.0":
retDict = self.__run("tpm2_getcap algorithms")
output = common.list_convert(retDict['retout'])
errout = common.list_convert(retDict['reterr'])
code = retDict['code']
if code != tpm_abstract.AbstractTPM.EXIT_SUCESS:
raise Exception("get_tpm_algorithms failed with code "+str(code)+": "+str(errout))
if tools_version == "3.2":
# output, human-readable -> json
output = "".join(output)
output = re.sub(r'TPMA_ALGORITHM for ALG_ID: 0x[0-9a-f]+\s+-\s+([a-z0-9_]+)', r'\1:', output)
output = output.replace("set", "1")
output = output.replace("clear", "0")
output = [output]
retyaml = common.yaml_to_dict(output)
for algorithm,details in retyaml.items():
if details["asymmetric"] == 1 and details["object"] == 1 and tpm_abstract.Encrypt_Algorithms.is_recognized(algorithm):
self.supported['encrypt'].add(algorithm)
elif details["hash"] == 1 and tpm_abstract.Hash_Algorithms.is_recognized(algorithm):
self.supported['hash'].add(algorithm)
elif details["asymmetric"] == 1 and details["signing"] == 1 and tpm_abstract.Sign_Algorithms.is_recognized(algorithm):
self.supported['sign'].add(algorithm)
#tpm_exec
@staticmethod
def __fingerprint(cmd):
# Creates a unique-enough ID from the given command
fprt = cmd.split()[0]
if fprt == 'tpm2_nvread':
if '0x1c00002' in cmd: # read_ekcert_nvram
fprt += '-ekcert'
else: # read_key_nvram
fprt += '-key'
elif fprt == "tpm2_getcap":
if 'handles-persistent' in cmd:
fprt += '-handles'
elif 'properties-fixed' in cmd:
fprt += '-props'
else:
# other commands are already unique
pass
return fprt
def __run(self, cmd, expectedcode=tpm_abstract.AbstractTPM.EXIT_SUCESS, raiseOnError=True, lock=True, outputpaths=None):
env = os.environ.copy()
lib_path = ""
if 'LD_LIBRARY_PATH' in env:
lib_path = env['LD_LIBRARY_PATH']
if 'TPM2TOOLS_TCTI' not in env:
# Don't clobber existing setting (if present)
env['TPM2TOOLS_TCTI'] = 'tabrmd:bus_name=com.intel.tss2.Tabrmd'
# Other (not recommended) options are direct emulator and chardev communications:
#env['TPM2TOOLS_TCTI'] = 'mssim:port=2321'
#env['TPM2TOOLS_TCTI'] = 'device:/dev/tpm0'
env['PATH'] = env['PATH']+":%s"%common.TPM_TOOLS_PATH
env['LD_LIBRARY_PATH'] = lib_path+":%s"%common.TPM_LIBS_PATH
# Convert single outputpath to list
if isinstance(outputpaths, str):
outputpaths = [outputpaths]
# Handle stubbing the TPM out
fprt = tpm2.__fingerprint(cmd)
if common.STUB_TPM and common.TPM_CANNED_VALUES is not None:
# Use canned values for stubbing
jsonIn = common.TPM_CANNED_VALUES
if fprt in jsonIn:
# The value we're looking for has been canned!
thisTiming = jsonIn[fprt]['timing']
thisRetout = jsonIn[fprt]['retout']
thisCode = jsonIn[fprt]['code']
thisFileout = jsonIn[fprt]['fileout']
fileoutEncoded = {}
# Decode files that are supplied (and requested)
if outputpaths is not None and len(outputpaths) > 0:
if len(thisFileout) == 1 and len(outputpaths) == 1:
#fileoutEncoded[outputpaths[0]] = base64.b64decode(next(iter(thisFileout.values()))).decode("zlib")
fileoutEncoded[outputpaths[0]] = zlib.decompress(base64.b64decode(next(iter(thisFileout.values()))))
elif fprt == "tpm2_deluxequote":
# quotes need 3 outputs, so we need a consistent way to match them back up when reading
quote_msg = ""
match = re.search("-m ([^\s]+)", cmd)
if match:
quote_msg = match.group(1)
if "file://quoteMessage" in thisFileout:
#fileoutEncoded[quote_msg] = base64.b64decode(thisFileout["file://quoteMessage"]).decode("zlib")
fileoutEncoded[quote_msg] = zlib.decompress(base64.b64decode(thisFileout["file://quoteMessage"]))
quote_sig = ""
match = re.search("-s ([^\s]+)", cmd)
if match:
quote_sig = match.group(1)
if "file://quoteSignature" in thisFileout:
#fileoutEncoded[quote_sig] = base64.b64decode(thisFileout["file://quoteSignature"]).decode("zlib")
fileoutEncoded[quote_sig] = zlib.decompress(base64.b64decode(thisFileout["file://quoteSignature"]))
quote_pcr = ""
match = re.search("-p ([^\s]+)", cmd)
if match:
quote_pcr = match.group(1)
if "file://quotePCR" in thisFileout:
#fileoutEncoded[quote_pcr] = base64.b64decode(thisFileout["file://quotePCR"]).decode("zlib")
fileoutEncoded[quote_pcr] = zlib.decompress(base64.b64decode(thisFileout["file://quotePCR"]))
else:
raise Exception("Command %s is using multiple files unexpectedly!"%(fprt))
logger.debug("TPM call '%s' was stubbed out, with a simulated delay of %f sec"%(fprt, thisTiming))
time.sleep(thisTiming)
# Package for return
returnDict = {
'retout': thisRetout,
'reterr': [],
'code': thisCode,
'fileouts': fileoutEncoded,
'timing': thisTiming,
}
return returnDict
elif not lock:
# non-lock calls don't go to the TPM (just let it pass through)
pass
else:
# Our command hasn't been canned!
raise Exception("Command %s not found in canned YAML!"%(fprt))
numtries = 0
while True:
if lock:
with self.tpmutilLock:
retDict = cmd_exec.run(cmd=cmd, expectedcode=expectedcode, raiseOnError=False, lock=lock, outputpaths=outputpaths, env=env)
else:
retDict = cmd_exec.run(cmd=cmd, expectedcode=expectedcode, raiseOnError=False, lock=lock, outputpaths=outputpaths, env=env)
t0 = retDict['timing']['t0']
t1 = retDict['timing']['t1']
code = retDict['code']
retout = retDict['retout']
reterr = retDict['reterr']
fileouts = retDict['fileouts']
# keep trying to get quote if a PCR race condition occurred in deluxe quote
if fprt == "tpm2_quote" and "Error validating calculated PCR composite with quote" in reterr:
numtries += 1
maxr = self.config.getint('cloud_agent', 'max_retries')
if numtries >= maxr:
logger.error("Agent did not return proper quote due to PCR race condition.")
break
retry = self.config.getfloat('cloud_agent', 'retry_interval')
logger.info("Failed to get quote %d/%d times, trying again in %f seconds..."%(numtries, maxr, retry))
time.sleep(retry)
continue
else:
break
# Don't bother continuing if TPM call failed and we're raising on error
if code != expectedcode and raiseOnError:
raise Exception("Command: %s returned %d, expected %d, output %s, stderr %s"%(cmd, code, expectedcode, retout, reterr))
# Metric output
if lock or self.tpmutilLock.locked():
pad = ""
if len(fprt) < 8:
pad += "\t"
if len(fprt) < 16:
pad += "\t"
if len(fprt) < 24:
pad += "\t"
filelen = 0
if fileouts is not None:
filelen = len(fileouts)
# Print out benchmarking information for TPM (if requested)
#print "\033[95mTIMING: %s%s\t:%f\toutlines:%d\tfilelines:%d\t%s\033[0m" % (fprt, pad, t1-t0, len(retout), filelen, cmd)
if common.TPM_BENCHMARK_PATH is not None:
with open(common.TPM_BENCHMARK_PATH, "ab") as f:
f.write("TIMING: %s%s\t:%f\toutlines:%d\tfilelines:%d\t%s\n" % (fprt, pad, t1-t0, len(retout), filelen, cmd))
# Print out YAML canned values (if requested)
# NOTE: resulting file will be missing the surrounding braces! (must add '{' and '}' for reading)
if common.TPM_CANNED_VALUES_PATH is not None:
with open(common.TPM_CANNED_VALUES_PATH, "ab") as can:
fileoutEncoded = {}
# Process files
if outputpaths is not None and len(outputpaths) > 0:
if len(fileouts) == 1 and len(outputpaths) == 1:
#fileoutEncoded[outputpaths[0]] = base64.b64encode(iter(fileouts.values()).next().encode("zlib"))
fileoutEncoded[outputpaths[0]] = zlib.compress(base64.b64decode(iter(fileouts.values()).next()))
elif fprt == "tpm2_deluxequote":
# quotes need 3 outputs, so we need a consistent way to match them back up when reading
quote_msg = ""
match = re.search("-m ([^\s]+)", cmd)
if match:
quote_msg = match.group(1)
if quote_msg in fileouts:
# fileoutEncoded["file://quoteMessage"] = base64.b64encode(fileouts[quote_msg].encode("zlib"))
fileoutEncoded["file://quoteMessage"] = zlib.compress(base64.b64decode(fileouts[quote_msg]))
quote_sig = ""
match = re.search("-s ([^\s]+)", cmd)
if match:
quote_sig = match.group(1)
if quote_sig in fileouts:
# fileoutEncoded["file://quoteSignature"] = base64.b64encode(fileouts[quote_sig].encode("zlib"))
fileoutEncoded["file://quoteSignature"] = zlib.compress(base64.b64decode(fileouts[quote_sig]))
quote_pcr = ""
match = re.search("-p ([^\s]+)", cmd)
if match:
quote_pcr = match.group(1)
if quote_pcr in fileouts:
# fileoutEncoded["file://quotePCR"] = base64.b64encode(fileouts[quote_pcr].encode("zlib"))
fileoutEncoded["file://quotePCR"] = zlib.compress(base64.b64decode(fileouts[quote_pcr]))
else:
raise Exception("Command %s is using multiple files unexpectedly!"%(fprt))
# tpm_cexec will need to know the nonce
nonce = ""
match = re.search("-q ([\w]+)", cmd)
if match:
nonce = binascii.a2b_hex(match.group(1))
jsonObj = {'type':fprt, 'retout':retout, 'fileout':fileoutEncoded, 'cmd':cmd, 'timing':t1-t0, 'code':code, 'nonce':nonce}
can.write("\"%s\": %s,\n"%(fprt, json.dumps(jsonObj, indent=4, sort_keys=True)))
return retDict
#tpm_initialize
def __startup_tpm(self):
retDict = self.__run("tpm2_startup -c")
output = common.list_convert(retDict['retout'])
errout = common.list_convert(retDict['reterr'])
code = retDict['code']
if code != tpm_abstract.AbstractTPM.EXIT_SUCESS:
raise Exception("Error initializing emulated TPM with TPM2_Startup: %s"+str(code)+": "+str(errout))
def __create_ek(self, asym_alg=None):
# this function is intended to be idempotent
if asym_alg is None:
asym_alg = self.defaults['encrypt']
current_handle = self.get_tpm_metadata("ek_handle")
owner_pw = self.get_tpm_metadata("owner_pw")
# clear out old handle before starting again (give idempotence)
if current_handle is not None and owner_pw is not None:
logger.info("Flushing old ek handle: %s"%hex(current_handle))
if tools_version == "3.2":
retDict = self.__run("tpm2_getcap -c handles-persistent", raiseOnError=False)
elif tools_version == "4.0":
retDict = self.__run("tpm2_getcap handles-persistent", raiseOnError=False)
output = retDict['retout']
reterr = retDict['reterr']
code = retDict['code']
if code != tpm_abstract.AbstractTPM.EXIT_SUCESS:
raise Exception("tpm2_getcap failed with code "+str(code)+": "+str(reterr))
outjson = common.yaml_to_dict(output)
if outjson is not None and current_handle in outjson:
if tools_version == "3.2":
retDict = self.__run("tpm2_evictcontrol -A o -c %s -P %s"%(hex(current_handle), owner_pw), raiseOnError=False)
else:
retDict = self.__run("tpm2_evictcontrol -C o -c %s -P %s"%(hex(current_handle), owner_pw), raiseOnError=False)
output = retDict['retout']
reterr = retDict['reterr']
code = retDict['code']
if code != tpm_abstract.AbstractTPM.EXIT_SUCESS:
logger.info("Failed to flush old ek handle: %s. Code %s"%(hex(current_handle), str(code)+": "+str(reterr)))
self._set_tpm_metadata('ek_handle', None)
self._set_tpm_metadata('ek_pw', None)
# make sure an ownership pw is set
if owner_pw is None:
owner_pw = tpm_abstract.TPM_Utilities.random_password(20)
self._set_tpm_metadata('owner_pw', owner_pw)
ek_pw = tpm_abstract.TPM_Utilities.random_password(20)
# create a new ek
with tempfile.NamedTemporaryFile() as tmppath:
cmdargs = {
'asymalg': asym_alg,
'ekpubfile': tmppath.name,
'ekpw': ek_pw,
'opw': owner_pw,
'epw': owner_pw
}
if tools_version == "3.2":
command = "tpm2_getpubek -H 0x81010007 -g {asymalg} -f {ekpubfile} -P {ekpw} -o {opw} -e {epw}".format(**cmdargs)
elif tools_version == "4.0":
command = "tpm2_createek -c - -G {asymalg} -u {ekpubfile} -p {ekpw} -w {opw} -P {epw}".format(**cmdargs)
retDict = self.__run(command, raiseOnError=False, outputpaths=tmppath.name)
output = retDict['retout']
reterr = retDict['reterr']
code = retDict['code']
ek_tpm = retDict['fileouts'][tmppath.name]
if code != tpm_abstract.AbstractTPM.EXIT_SUCESS:
raise Exception("createek failed with code "+str(code)+": "+str(reterr))
if tools_version == "3.2":
handle = int(0x81010007)
else:
handle = None
retyaml = common.yaml_to_dict(output)
if "persistent-handle" in retyaml:
handle = retyaml["persistent-handle"]
self._set_tpm_metadata('ek_handle', handle)
self._set_tpm_metadata('ek_pw', ek_pw)
self._set_tpm_metadata('ek_tpm', base64.b64encode(ek_tpm))
return
def __take_ownership(self, config_pw):
# if no ownerpassword
if config_pw == 'generate':
logger.info("Generating random TPM owner password")
owner_pw = tpm_abstract.TPM_Utilities.random_password(20)
else:
logger.info("Taking ownership with config provided TPM owner password: %s"%config_pw)
owner_pw = config_pw
if tools_version == "3.2":
retDict = self.__run("tpm2_takeownership -c", raiseOnError=False)
retDict = self.__run("tpm2_takeownership -o %s -e %s"%(owner_pw, owner_pw), raiseOnError=False)
elif tools_version == "4.0":
retDict = self.__run("tpm2_changeauth -c o %s"%(owner_pw), raiseOnError=False)
retDict = self.__run("tpm2_changeauth -c e %s"%(owner_pw), raiseOnError=False)
output = retDict['retout']
code = retDict['code']
if code != tpm_abstract.AbstractTPM.EXIT_SUCESS:
# if we fail, see if already owned with this pw
if tools_version == "3.2":
retDict = self.__run("tpm2_takeownership -o %s -e %s -O %s -E %s"%(owner_pw, owner_pw, owner_pw, owner_pw), raiseOnError=False)
elif tools_version == "4.0":
retDict = self.__run("tpm2_changeauth -c o -p %s %s"%(owner_pw, owner_pw), raiseOnError=False)
retDict = self.__run("tpm2_changeauth -c e -p %s %s"%(owner_pw, owner_pw), raiseOnError=False)
output = retDict['retout']
reterr = retDict['reterr']
code = retDict['code']
if code != tpm_abstract.AbstractTPM.EXIT_SUCESS:
# ut-oh, already owned but not with provided pw!
raise Exception("Owner password unknown, TPM reset required. Code %s"+str(code)+": "+str(reterr))
self._set_tpm_metadata('owner_pw', owner_pw)
logger.info("TPM Owner password confirmed: %s"%owner_pw)
def __get_pub_ek(self): # assumes that owner_pw is correct at this point
handle = self.get_tpm_metadata('ek_handle')
if handle is None:
raise Exception("create_ek has not been run yet?")
#make a temp file for the output
with tempfile.NamedTemporaryFile() as tmppath:
# generates pubek.pem
if tools_version == "3.2":
retDict = self.__run("tpm2_readpublic -H %s -o %s -f pem"%(hex(handle), tmppath.name), raiseOnError=False, outputpaths=tmppath.name)
else:
retDict = self.__run("tpm2_readpublic -c %s -o %s -f pem"%(hex(handle), tmppath.name), raiseOnError=False, outputpaths=tmppath.name)
output = retDict['retout']
reterr = retDict['reterr']
code = retDict['code']
ek = retDict['fileouts'][tmppath.name]
if code != tpm_abstract.AbstractTPM.EXIT_SUCESS:
raise Exception("tpm2_readpublic failed with code "+str(code)+": "+str(reterr))
self._set_tpm_metadata('ek', ek)
def __get_pub_aik(self):
"""Retrieves the PEM version of the public AIK.
Helper function for '__create_aik', required for legacy (v3) of
tpm2-tools since tpm2_getpubak does not support outputting public AIK
in the required PEM format. Note that 'aik_handle' metadata must
have been set before running this function. Function sets the
'aik' metadata.
"""
if not tools_version == "3.2":
logger.error("The get_pub_aik method does not apply to modern tpm2-tools!")
return
handle = self.get_tpm_metadata('aik_handle')
if handle is None:
raise Exception("tpm2_getpubak has not been run yet?")
#make a temp file for the output
with tempfile.NamedTemporaryFile() as akpubfile:
# generates pubak.pem
retDict = self.__run("tpm2_readpublic -H %s -o %s -f pem"%(hex(handle), akpubfile.name), raiseOnError=False, outputpaths=akpubfile.name)
output = retDict['retout']
reterr = retDict['reterr']
code = retDict['code']
pem = retDict['fileouts'][akpubfile.name]
if code != tpm_abstract.AbstractTPM.EXIT_SUCESS:
raise Exception("tpm2_readpublic failed with code "+str(code)+": "+str(reterr))
if pem == "":
raise Exception("unable to read public aik from create identity. Is your tpm2-tools installation up to date?")
self._set_tpm_metadata('aik', pem)
def __create_aik(self, activate, asym_alg=None, hash_alg=None, sign_alg=None):
if hash_alg is None:
hash_alg = self.defaults['hash']
if asym_alg is None:
asym_alg = self.defaults['encrypt']
if sign_alg is None:
sign_alg = self.defaults['sign']
owner_pw = self.get_tpm_metadata('owner_pw')
# clear out old handle before starting again (give idempotence)
if self.get_tpm_metadata('aik') is not None and self.get_tpm_metadata('aik_name') is not None:
aik_handle = self.get_tpm_metadata('aik_handle')
if tools_version == "3.2":
logger.info("Flushing old ak handle: %s"%hex(aik_handle))
retDict = self.__run("tpm2_getcap -c handles-persistent", raiseOnError=False)
elif tools_version == "4.0":
logger.info("Flushing old ak handle: %s"%aik_handle)
retDict = self.__run("tpm2_getcap handles-persistent", raiseOnError=False)
output = common.list_convert(retDict['retout'])
errout = common.list_convert(retDict['reterr'])
code = retDict['code']
if code != tpm_abstract.AbstractTPM.EXIT_SUCESS:
raise Exception("tpm2_getcap failed with code "+str(code)+": "+str(errout))
if tools_version == "3.2":
# output, human-readable -> json
output = "".join(output)
output = output.replace("0x", " - 0x")
output = [output]
outjson = common.yaml_to_dict(output)
if outjson is not None and aik_handle in outjson:
if tools_version == "3.2":
retDict = self.__run("tpm2_evictcontrol -A o -c %s -P %s"%(hex(aik_handle), owner_pw), raiseOnError=False)
else:
retDict = self.__run("tpm2_evictcontrol -C o -c %s -P %s"%(aik_handle, owner_pw), raiseOnError=False)
output = retDict['retout']
reterr = retDict['reterr']
code = retDict['code']
if code != tpm_abstract.AbstractTPM.EXIT_SUCESS:
if tools_version == "3.2":
logger.info("Failed to flush old ak handle: %s. Code %s"%(hex(aik_handle), str(code)+": "+str(reterr)))
elif tools_version == "4.0":
logger.info("Failed to flush old ak handle: %s. Code %s"%(aik_handle, str(code)+": "+str(reterr)))
self._set_tpm_metadata('aik', None)
self._set_tpm_metadata('aik_name', None)
self._set_tpm_metadata('aik_pw', None)
self._set_tpm_metadata('aik_handle', None)
logger.debug("Creating a new AIK identity")
# We need an ek handle to make an aik
ek_handle = self.get_tpm_metadata("ek_handle")
if ek_handle is None:
raise Exception("Failed to create AIK, since EK has not yet been created!")
aik_pw = tpm_abstract.TPM_Utilities.random_password(20)
#make a temp file for the output
with tempfile.NamedTemporaryFile() as akpubfile:
secpath = ""
if tools_version == "4.0":
# ok lets write out the key now
secdir = secure_mount.mount() # confirm that storage is still securely mounted
secfd, secpath = tempfile.mkstemp(dir=secdir)
cmdargs = {
'ekhandle': hex(ek_handle),
'aksession': secpath,
'akpubfile': akpubfile.name,
'asymalg': asym_alg,
'hashalg': hash_alg,
'signalg': sign_alg,
'epw': owner_pw,
'opw': owner_pw,
'apw': aik_pw
}
if tools_version == "3.2":
command = "tpm2_getpubak -E {ekhandle} -k 0x81010008 -g {asymalg} -D {hashalg} -s {signalg} -f {akpubfile} -e {epw} -P {apw} -o {opw}".format(**cmdargs)
elif tools_version == "4.0":
command = "tpm2_createak -C {ekhandle} -c {aksession} -G {asymalg} -g {hashalg} -s {signalg} -u {akpubfile} -f pem -p {apw} -P {epw}".format(**cmdargs)
retDict = self.__run(command, outputpaths=akpubfile.name)
retout = retDict['retout']
reterr = retDict['reterr']
code = retDict['code']
if code != tpm_abstract.AbstractTPM.EXIT_SUCESS:
raise Exception("tpm2_createak failed with code "+str(code)+": "+str(reterr))
jsonout = common.yaml_to_dict(retout)
akname = jsonout['loaded-key']['name']
if tools_version == "3.2":
if 'loaded-key' not in jsonout or 'name' not in jsonout['loaded-key']:
raise Exception("tpm2_createak failed to create aik: return "+str(reterr))
handle = int(0x81010008)
# get and persist the pem (not returned by tpm2_getpubak)
self._set_tpm_metadata('aik_handle', handle)
self.__get_pub_aik()
else:
if 'loaded-key' not in jsonout:
raise Exception("tpm2_createak failed to create aik: return "+str(reterr))
handle = secpath
pem = retDict['fileouts'][akpubfile.name]
if pem == "":
raise Exception("unable to read public aik from create identity. Is your tpm2-tools installation up to date?")
# persist the pem
self._set_tpm_metadata('aik_handle', handle)
self._set_tpm_metadata('aik', pem)
# persist common results
self._set_tpm_metadata('aik_name', akname)
self._set_tpm_metadata('aik_pw', aik_pw)
def flush_keys(self):
logger.debug("Flushing keys from TPM...")
if tools_version == "3.2":
retDict = self.__run("tpm2_getcap -c handles-persistent")
elif tools_version == "4.0":
retDict = self.__run("tpm2_getcap handles-persistent")
# retout = retDict['retout']
retout = common.list_convert(retDict['retout'])
errout = common.list_convert(retDict['reterr'])
code = retDict['code']
if code != tpm_abstract.AbstractTPM.EXIT_SUCESS:
logger.debug("tpm2_getcap failed with code "+str(code)+": "+str(errout))
if tools_version == "3.2":
# output, human-readable -> json
retout = "".join(retout)
retout = retout.replace("0x", " - 0x")
retout = [retout]
owner_pw = self.get_tpm_metadata("owner_pw")
jsonout = common.yaml_to_dict(retout)
for key in jsonout:
logger.debug("Flushing key handle %s"%hex(key))
if tools_version == "3.2":
self.__run("tpm2_evictcontrol -A o -c %s -P %s"%(hex(key), owner_pw), raiseOnError=False)
else:
self.__run("tpm2_evictcontrol -C o -c %s -P %s"%(hex(key), owner_pw), raiseOnError=False)
def encryptAIK(self, uuid, pubaik, pubek, ek_tpm, aik_name):
pubaikFile = None
pubekFile = None
challengeFile = None
keyblob = None
blobpath = None
if ek_tpm is None or aik_name is None:
logger.error("Missing parameters for encryptAIK")
return None
try:
# write out the public EK
efd, etemp = tempfile.mkstemp()
pubekFile = open(etemp, "wb")
pubekFile.write(base64.b64decode(ek_tpm))
pubekFile.close()
os.close(efd)
# write out the challenge
challenge = tpm_abstract.TPM_Utilities.random_password(32)
challenge = challenge.encode()
keyfd, keypath = tempfile.mkstemp()
challengeFile = open(keypath, "wb")
challengeFile.write(challenge)
challengeFile.close()
os.close(keyfd)
# create temp file for the blob
blobfd, blobpath = tempfile.mkstemp()
cmdargs = {
'akname': aik_name,
'ekpub': pubekFile.name,
'blobout': blobpath,
'challenge': challengeFile.name
}
if tools_version == "3.2":
command = "tpm2_makecredential -T none -e {ekpub} -s {challenge} -n {akname} -o {blobout}".format(**cmdargs)
else:
command = "tpm2_makecredential -T none -e {ekpub} -s {challenge} -n {akname} -o {blobout}".format(**cmdargs)
self.__run(command, lock=False)
logger.info("Encrypting AIK for UUID %s"%uuid)
# read in the blob
f = open(blobpath, "rb")
keyblob = base64.b64encode(f.read())
f.close()
os.close(blobfd)
# read in the aes key
key = base64.b64encode(challenge)
except Exception as e:
logger.error("Error encrypting AIK: "+str(e))
logger.exception(e)
return None
finally:
if pubekFile is not None:
os.remove(pubekFile.name)
if challengeFile is not None:
os.remove(challengeFile.name)
if blobpath is not None:
os.remove(blobpath)
return (keyblob, key)
def activate_identity(self, keyblob):
owner_pw = self.get_tpm_metadata('owner_pw')
aik_keyhandle = self.get_tpm_metadata('aik_handle')
ek_keyhandle = self.get_tpm_metadata('ek_handle')
keyblobFile = None
secpath = None
sesspath = None
try:
# write out key blob
kfd, ktemp = tempfile.mkstemp()
keyblobFile = open(ktemp, "wb")
# the below is a coroutine?
keyblobFile.write(base64.b64decode(keyblob))
keyblobFile.close()
os.close(kfd)
# ok lets write out the key now
secdir = secure_mount.mount() # confirm that storage is still securely mounted
secfd, secpath = tempfile.mkstemp(dir=secdir)
sessfd, sesspath = tempfile.mkstemp(dir=secdir)
if tools_version == "3.2":
cmdargs = {
'akhandle': hex(aik_keyhandle),
'ekhandle': hex(ek_keyhandle),
'keyblobfile': keyblobFile.name,
'credfile': secpath,
'apw': self.get_tpm_metadata('aik_pw'),
'epw': owner_pw
}
command = "tpm2_activatecredential -H {akhandle} -k {ekhandle} -f {keyblobfile} -o {credfile} -P {apw} -e {epw}".format(**cmdargs)
retDict = self.__run(command, outputpaths=secpath)
else:
cmdargs = {
'akhandle': aik_keyhandle,
'ekhandle': hex(ek_keyhandle),
'keyblobfile': keyblobFile.name,
'sessfile': sesspath,
'credfile': secpath,
'apw': self.get_tpm_metadata('aik_pw'),
'epw': owner_pw
}
self.__run("tpm2_startauthsession --policy-session -S {sessfile}".format(**cmdargs))
self.__run("tpm2_policysecret -S {sessfile} -c 0x4000000B {epw}".format(**cmdargs))
command = "tpm2_activatecredential -c {akhandle} -C {ekhandle} -i {keyblobfile} -o {credfile} -p {apw} -P \"session:{sessfile}\"".format(**cmdargs)
retDict = self.__run(command, outputpaths=secpath)
self.__run("tpm2_flushcontext {sessfile}".format(**cmdargs))
retout = retDict['retout']
code = retDict['code']
fileout = retDict['fileouts'][secpath]
logger.info("AIK activated.")
key = base64.b64encode(fileout)
os.close(secfd)
os.remove(secpath)
except Exception as e:
logger.error("Error decrypting AIK: "+str(e))
logger.exception(e)
return False
finally:
if keyblobFile is not None:
os.remove(keyblobFile.name)
if secpath is not None and os.path.exists(secpath):
os.remove(secpath)
if sesspath is not None and os.path.exists(sesspath):
os.remove(sesspath)
return key
def verify_ek(self, ekcert, ekpem):
"""Verify that the provided EK certificate is signed by a trusted root
:param ekcert: The Endorsement Key certificate in DER format
:param ekpem: the endorsement public key in PEM format
:returns: True if the certificate can be verified, false otherwise
"""
#openssl x509 -inform der -in certificate.cer -out certificate.pem
try:
ek509 = M2Crypto.X509.load_cert_der_string(ekcert)
ekcertpem = ek509.get_pubkey().get_rsa().as_pem(cipher=None).decode('utf-8')
# Make sure given ekcert is for their ek
if str(ekpem) != str(ekcertpem):
logger.error("Public EK does not match EK certificate")
return False
trusted_certs = tpm_ek_ca.cert_loader()
for cert in trusted_certs:
signcert = M2Crypto.X509.load_cert_string(cert)
signkey = signcert.get_pubkey()
if ek509.verify(signkey) == 1:
logger.debug(f"EK cert matched cert: {cert}")
return True
except Exception as e:
# Log the exception so we don't lose the raw message
logger.exception(e)
raise Exception("Error processing ek/ekcert. Does this TPM have a valid EK?").with_traceback(sys.exc_info()[2])
logger.error(f"No Root CA matched EK Certificate")
return False
def get_tpm_manufacturer(self):
vendorStr = None
if tools_version == "3.2":
retDict = self.__run("tpm2_getcap -c properties-fixed")
elif tools_version == "4.0":
retDict = self.__run("tpm2_getcap properties-fixed")
output = retDict['retout']
reterr = retDict['reterr']
code = retDict['code']
if code != tpm_abstract.AbstractTPM.EXIT_SUCESS:
raise Exception("get_tpm_manufacturer failed with code "+str(code)+": "+str(reterr))
# Clean up TPM manufacturer information (strip control characters)
# These strings are supposed to be printable ASCII characters, but
# some TPM manufacturers put control characters in here
for i, s in enumerate(output):
output[i] = re.sub(r"[\x01-\x1F\x7F]", "", s.decode('utf-8')).encode('utf-8')
retyaml = common.yaml_to_dict(output)
if "TPM2_PT_VENDOR_STRING_1" in retyaml:
vendorStr = retyaml["TPM2_PT_VENDOR_STRING_1"]["value"]
elif "TPM_PT_VENDOR_STRING_1" in retyaml:
vendorStr = retyaml["TPM_PT_VENDOR_STRING_1"]["as string"].strip()
return vendorStr
def is_emulator(self):
return self.get_tpm_manufacturer() == 'SW'
def is_vtpm(self):
return False
def tpm_init(self, self_activate=False, config_pw=None):
# this was called tpm_initialize.init before
self.warn_emulator()
self.__take_ownership(config_pw)
self.__create_ek()
self.__get_pub_ek()
ekcert = self.read_ekcert_nvram()
self._set_tpm_metadata('ekcert', ekcert)
# if no AIK created, then create one
self.__create_aik(self_activate)
return self.get_tpm_metadata('ek'), self.get_tpm_metadata('ekcert'), self.get_tpm_metadata('aik'), self.get_tpm_metadata('ek_tpm'), self.get_tpm_metadata('aik_name')
#tpm_quote
def __pcr_mask_to_list(self, mask, hash_alg):
pcr_list = []
ima_appended = ""
for pcr in range(24):
if tpm_abstract.TPM_Utilities.check_mask(mask, pcr):
if hash_alg != tpm_abstract.Hash_Algorithms.SHA1 and pcr == common.IMA_PCR:
# IMA is only in SHA1 format
ima_appended = "+sha1:"+str(pcr)
else:
pcr_list.append(str(pcr))
return ",".join(pcr_list)+ima_appended
def create_deep_quote(self, nonce, data=None, vpcrmask=tpm_abstract.AbstractTPM.EMPTYMASK, pcrmask=tpm_abstract.AbstractTPM.EMPTYMASK):
raise Exception("vTPM support and deep quotes not yet implemented with TPM 2.0!")
def create_quote(self, nonce, data=None, pcrmask=tpm_abstract.AbstractTPM.EMPTYMASK, hash_alg=None):
if hash_alg is None:
hash_alg = self.defaults['hash']
quote = ""
with tempfile.NamedTemporaryFile() as quotepath:
with tempfile.NamedTemporaryFile() as sigpath:
with tempfile.NamedTemporaryFile() as pcrpath:
keyhandle = self.get_tpm_metadata('aik_handle')
aik_pw = self.get_tpm_metadata('aik_pw')
if pcrmask is None:
pcrmask = tpm_abstract.AbstractTPM.EMPTYMASK
# add PCR 16 to pcrmask
pcrmask = "0x%X"%(int(pcrmask,0) + (1 << common.TPM_DATA_PCR))
pcrlist = self.__pcr_mask_to_list(pcrmask, hash_alg)
with self.tpmutilLock:
if data is not None:
self.__run("tpm2_pcrreset %d"%common.TPM_DATA_PCR, lock=False)
self.extendPCR(pcrval=common.TPM_DATA_PCR, hashval=self.hashdigest(data), lock=False)
if tools_version == "3.2":
cmdargs = {
'aik_handle': hex(keyhandle),
'hashalg' : hash_alg,
'pcrlist': pcrlist,
'nonce': bytes(nonce, encoding="utf8").hex(),
'outquote': quotepath.name,
'outsig': sigpath.name,
'outpcr': pcrpath.name,
'akpw': aik_pw
}
command = "tpm2_quote -k {aik_handle} -L {hashalg}:{pcrlist} -q {nonce} -m {outquote} -s {outsig} -p {outpcr} -G {hashalg} -P {akpw}".format(**cmdargs)
else:
cmdargs = {
'aik_handle': keyhandle,
'hashalg' : hash_alg,
'pcrlist': pcrlist,
'nonce': bytes(nonce, encoding="utf8").hex(),
'outquote': quotepath.name,
'outsig': sigpath.name,
'outpcr': pcrpath.name,
'akpw': aik_pw
}
command = "tpm2_quote -c {aik_handle} -l {hashalg}:{pcrlist} -q {nonce} -m {outquote} -s {outsig} -o {outpcr} -g {hashalg} -p {akpw}".format(**cmdargs)
retDict = self.__run(command, lock=False, outputpaths=[quotepath.name, sigpath.name, pcrpath.name])
retout = retDict['retout']
code = retDict['code']
quoteraw = retDict['fileouts'][quotepath.name]
quote_b64encode = base64.b64encode(zlib.compress(quoteraw))
sigraw = retDict['fileouts'][sigpath.name]
sigraw_b64encode = base64.b64encode(zlib.compress(sigraw))
pcrraw = retDict['fileouts'][pcrpath.name]
pcrraw_b64encode = base64.b64encode(zlib.compress(pcrraw))
quote = quote_b64encode.decode('utf-8')+":"+sigraw_b64encode.decode('utf-8')+":"+pcrraw_b64encode.decode('utf-8')
return 'r'+quote
def __checkdeepquote_c(self, hAIK, vAIK, deepquoteFile, nonce):
raise Exception("vTPM support and deep quotes not yet implemented with TPM 2.0!")
def check_deep_quote(self, agent_id, nonce, data, quote, vAIK, hAIK, vtpm_policy={}, tpm_policy={}, ima_measurement_list=None, ima_whitelist={}):
raise Exception("vTPM support and deep quotes not yet implemented with TPM 2.0!")
def __check_quote_c(self, pubaik, nonce, quoteFile, sigFile, pcrFile, hash_alg):
if common.STUB_TPM and common.TPM_CANNED_VALUES is not None:
jsonIn = common.TPM_CANNED_VALUES
if 'tpm2_deluxequote' in jsonIn and 'nonce' in jsonIn['tpm2_deluxequote']:
# YAML unicode-ifies strings, and C calls require byte strings (str)
nonce = str(jsonIn['tpm2_deluxequote']['nonce'])
else:
raise Exception("Could not get quote nonce from canned JSON!")
cmdargs = {
'pubak': pubaik,
'quotefile' : quoteFile,
'sigfile': sigFile,
'pcrfile': pcrFile,
'hashalg': hash_alg,
'nonce': bytes(nonce, encoding="utf8").hex()
}
if tools_version == "3.2":
command = "tpm2_checkquote -c {pubak} -m {quotefile} -s {sigfile} -p {pcrfile} -G {hashalg} -q {nonce}"
else:
command = "tpm2_checkquote -u {pubak} -m {quotefile} -s {sigfile} -f {pcrfile} -g {hashalg} -q {nonce}"
retDict = self.__run(command.format(**cmdargs), lock=False)
return retDict
def check_quote(self, agent_id, nonce, data, quote, aikFromRegistrar, tpm_policy={}, ima_measurement_list=None, ima_whitelist={}, hash_alg=None):
if hash_alg is None:
hash_alg = self.defaults['hash']
quoteFile = None
aikFile = None
sigFile = None
pcrFile = None
if quote[0] != 'r':
raise Exception("Invalid quote type %s"%quote[0])
quote = quote[1:]
quote_tokens = quote.split(":")
if len(quote_tokens) < 3:
raise Exception("Quote is not compound! %s"%quote)
quoteblob = zlib.decompress(base64.b64decode(quote_tokens[0]))
sigblob = zlib.decompress(base64.b64decode(quote_tokens[1]))
pcrblob = zlib.decompress(base64.b64decode(quote_tokens[2]))
try:
# write out quote
qfd, qtemp = tempfile.mkstemp()
quoteFile = open(qtemp, "wb")
quoteFile.write(quoteblob)
quoteFile.close()
os.close(qfd)
# write out sig
sfd, stemp = tempfile.mkstemp()
sigFile = open(stemp, "wb")
sigFile.write(sigblob)
sigFile.close()
os.close(sfd)
# write out pcr
pfd, ptemp = tempfile.mkstemp()
pcrFile = open(ptemp, "wb")
pcrFile.write(pcrblob)
pcrFile.close()
os.close(pfd)
afd, atemp = tempfile.mkstemp()
aikFile = open(atemp, "wb")
aikFile.write(aikFromRegistrar.encode('utf-8'))
aikFile.close()
os.close(afd)
retDict = self.__check_quote_c(aikFile.name, nonce, quoteFile.name, sigFile.name, pcrFile.name, hash_alg)
retout = retDict['retout']
reterr = retDict['reterr']
code = retDict['code']
except Exception as e:
logger.error("Error verifying quote: "+str(e))
logger.exception(e)
return False
finally:
if aikFile is not None:
os.remove(aikFile.name)
if quoteFile is not None:
os.remove(quoteFile.name)
if sigFile is not None:
os.remove(sigFile.name)
if pcrFile is not None:
os.remove(pcrFile.name)
if len(retout) < 1 or code != tpm_abstract.AbstractTPM.EXIT_SUCESS:
logger.error("Failed to validate signature, output: %s"%reterr)
return False
pcrs = []
jsonout = common.yaml_to_dict(retout)
if "pcrs" in jsonout:
if hash_alg in jsonout["pcrs"]:
alg_size = tpm_abstract.Hash_Algorithms.get_hash_size(hash_alg) // 4
for pcrval, hashval in jsonout["pcrs"][hash_alg].items():
pcrs.append("PCR " + str(pcrval) + " " + '{0:0{1}x}'.format(hashval, alg_size))
# IMA is always in SHA1 format, so don't leave it behind!
if hash_alg != tpm_abstract.Hash_Algorithms.SHA1:
if tpm_abstract.Hash_Algorithms.SHA1 in jsonout["pcrs"] and common.IMA_PCR in jsonout["pcrs"][tpm_abstract.Hash_Algorithms.SHA1]:
sha1_size = tpm_abstract.Hash_Algorithms.get_hash_size(tpm_abstract.Hash_Algorithms.SHA1) // 4
ima_val = jsonout["pcrs"][tpm_abstract.Hash_Algorithms.SHA1][common.IMA_PCR]
pcrs.append("PCR " + str(common.IMA_PCR) + " " + '{0:0{1}x}'.format(ima_val, sha1_size))
if len(pcrs) == 0:
pcrs = None
return self.check_pcrs(agent_id, tpm_policy, pcrs, data, False, ima_measurement_list, ima_whitelist)
def sim_extend(self,hashval_1,hashval_0=None):
# simulate extending a PCR value by performing TPM-specific extend procedure
if hashval_0 is None:
hashval_0 = self.START_HASH()
# compute expected value H(0|H(data))
extendedval = self.hashdigest(codecs.decode(hashval_0,'hex_codec')+codecs.decode(self.hashdigest(hashval_1.encode('utf-8')),'hex_codec')).lower()
return extendedval
def extendPCR(self, pcrval, hashval, hash_alg=None, lock=True):
if hash_alg is None:
hash_alg = self.defaults['hash']
self.__run("tpm2_pcrextend %d:%s=%s"%(pcrval, hash_alg, hashval), lock=lock)
def readPCR(self, pcrval, hash_alg=None):
if hash_alg is None:
hash_alg = self.defaults['hash']
if tools_version == "3.2":
output = common.list_convert(self.__run("tpm2_pcrlist")['retout'])
elif tools_version == "4.0":
output = common.list_convert(self.__run("tpm2_pcrread")['retout'])
jsonout = common.yaml_to_dict(output)
if hash_alg not in jsonout:
raise Exception("Invalid hashing algorithm '%s' for reading PCR number %d."%(hash_alg, pcrval))
# alg_size = Hash_Algorithms.get_hash_size(hash_alg)/4
alg_size = tpm_abstract.Hash_Algorithms.get_hash_size(hash_alg) // 4
return '{0:0{1}x}'.format(jsonout[hash_alg][pcrval], alg_size)
#tpm_random
def _get_tpm_rand_block(self, size=32):
#make a temp file for the output
rand = None
with tempfile.NamedTemporaryFile() as randpath:
try:
command = "tpm2_getrandom -o %s %d" % (randpath.name, size)
retDict = self.__run(command, outputpaths=randpath.name)
retout = retDict['retout']
code = retDict['code']
rand = retDict['fileouts'][randpath.name]
except Exception as e:
if not self.tpmrand_warned:
logger.warn("TPM randomness not available: %s"%e)
self.tpmrand_warned = True
return None
return rand
#tpm_nvram
def write_key_nvram(self, key):
owner_pw = self.get_tpm_metadata('owner_pw')
# write out quote
with tempfile.NamedTemporaryFile() as keyFile:
keyFile.write(key)
keyFile.flush()
attrs = "ownerread|ownerwrite"
if tools_version == "3.2":
self.__run("tpm2_nvdefine -x 0x1500018 -a 0x40000001 -s %s -t \"%s\" -I %s -P %s"%(common.BOOTSTRAP_KEY_SIZE, attrs, owner_pw, owner_pw), raiseOnError=False)
self.__run("tpm2_nvwrite -x 0x1500018 -a 0x40000001 -P %s %s"%(owner_pw, keyFile.name), raiseOnError=False)
else:
self.__run("tpm2_nvdefine 0x1500018 -C 0x40000001 -s %s -a \"%s\" -p %s -P %s"%(common.BOOTSTRAP_KEY_SIZE, attrs, owner_pw, owner_pw), raiseOnError=False)
self.__run("tpm2_nvwrite 0x1500018 -C 0x40000001 -P %s -i %s"%(owner_pw, keyFile.name), raiseOnError=False)
return
def read_ekcert_nvram(self):
#make a temp file for the quote
with tempfile.NamedTemporaryFile() as nvpath:
# Check for RSA EK cert in NVRAM (and get length)
if tools_version == "3.2":
retDict = self.__run("tpm2_nvlist", raiseOnError=False)
elif tools_version == "4.0":
retDict = self.__run("tpm2_nvreadpublic", raiseOnError=False)
output = retDict['retout']
reterr = retDict['reterr']
code = retDict['code']
if code != tpm_abstract.AbstractTPM.EXIT_SUCESS:
if tools_version == "3.2":
raise Exception("tpm2_nvlist for ekcert failed with code "+str(code)+": "+str(reterr))
elif tools_version == "4.0":
raise Exception("tpm2_nvreadpublic for ekcert failed with code "+str(code)+": "+str(reterr))
outjson = common.yaml_to_dict(output)
if outjson is None or 0x1c00002 not in outjson or "size" not in outjson[0x1c00002]:
logger.warn("No EK certificate found in TPM NVRAM")
return None
ekcert_size = outjson[0x1c00002]["size"]
# Read the RSA EK cert from NVRAM (DER format)
if tools_version == "3.2":
retDict = self.__run("tpm2_nvread -x 0x1c00002 -s %s -f %s -a 0x01c00002"%(ekcert_size, nvpath.name), raiseOnError=False, outputpaths=nvpath.name)
elif tools_version == "4.0":
retDict = self.__run("tpm2_nvread 0x1c00002 -s %s -o %s"%(ekcert_size, nvpath.name), raiseOnError=False, outputpaths=nvpath.name)
output = common.list_convert(retDict['retout'])
errout = common.list_convert(retDict['reterr'])
code = retDict['code']
ekcert = retDict['fileouts'][nvpath.name]
if code != tpm_abstract.AbstractTPM.EXIT_SUCESS:
raise Exception("tpm2_nvread for ekcert failed with code "+str(code)+": "+str(errout))
return base64.b64encode(ekcert)
def read_key_nvram(self):
owner_pw = self.get_tpm_metadata('owner_pw')
if tools_version == "3.2":
retDict = self.__run("tpm2_nvread -x 0x1500018 -a 0x40000001 -s %s -P %s"%(common.BOOTSTRAP_KEY_SIZE, owner_pw), raiseOnError=False)
else:
retDict = self.__run("tpm2_nvread 0x1500018 -C 0x40000001 -s %s -P %s"%(common.BOOTSTRAP_KEY_SIZE, owner_pw), raiseOnError=False)
output = retDict['retout']
errout = common.list_convert(retDict['reterr'])
code = retDict['code']
if code != tpm_abstract.AbstractTPM.EXIT_SUCESS:
if len(errout) > 0 and "handle does not exist" in "\n".join(errout):
logger.debug("No stored U in TPM NVRAM")
return None
elif len(errout) > 0 and "ERROR: Failed to read NVRAM public area at index" in "\n".join(errout):
logger.debug("No stored U in TPM NVRAM")
return None
else:
raise Exception("nv_readvalue failed with code "+str(code)+": "+str(errout))
if len(output) != common.BOOTSTRAP_KEY_SIZE:
logger.debug("Invalid key length from NVRAM: %d"%(len(output)))
return None
return output
|
py | b40e58ebad2c256b5b18f1b0c9746ae6d7fb0464 | # -*- coding: utf-8 -*-
"""
A dataset containing 50k reactions of 10 types from USPTO data. It is commonly used in papers.
"""
import logging
import os
import pandas as pd
from tqdm import tqdm
from src import DATA_DIR
from src.datasets import Dataset
from src.utils import complete_mappings
logger = logging.getLogger(__name__)
REACTION_TYPES = {
1: 'heteroatom alkylation and arylation',
2: 'acylation and related processes',
3: 'C-C bond formation',
4: 'heterocycle formation',
5: 'protections',
6: 'deprotections',
7: 'reductions',
8: 'oxidations',
9: 'functional group interconversion (FGI)',
10: 'functional group addition (FGA)'
}
class Uspto50k(Dataset):
def __init__(self):
super(Uspto50k, self).__init__()
self.raw_data_path = os.path.join(self.feat_dir, 'data_processed.csv')
@property
def meta_info(self) -> dict:
return {'reaction_types': REACTION_TYPES, 'max_n_nodes': 100}
@property
def key(self) -> str:
return 'uspto_50k'
def acquire(self):
x = {
'product': [],
'substrates': []
}
split = {
'train': [],
'valid': [],
'test': []
}
meta = {
'reaction_type_id': [],
'id': []
}
curr_id = 0 # minhtoo add
for split_key, filename in (('train', 'raw_train.csv'), ('valid', 'raw_val.csv'), ('test', 'raw_test.csv')):
data_path = os.path.join(DATA_DIR, f'uspto_50k/{filename}')
if not os.path.exists(data_path):
raise FileNotFoundError(
f'File not found at: {data_path}. Please download data manually from '
'https://www.dropbox.com/sh/6ideflxcakrak10/AAAESdZq7Y0aNGWQmqCEMlcza/typed_schneider50k '
'and extract to the required location.')
data_df = pd.read_csv(data_path)
ids = [] # minhtoo add
# data_df['reactants>reagents>production']
for reaction_smiles in tqdm(data_df['rxn_smiles'], total=len(data_df),
desc="generating product/substrates SMILES'"):
subs, prod = tuple(reaction_smiles.split('>>'))
subs, prod = complete_mappings(subs, prod)
x['substrates'].append(subs)
x['product'].append(prod)
curr_id += 1
ids.append(curr_id)
for split_key2 in ['train', 'valid', 'test']:
if split_key == split_key2:
split[split_key2] += [1 for _ in range(len(data_df))]
else:
split[split_key2] += [0 for _ in range(len(data_df))]
# meta['reaction_type_id'] += data_df['class'].tolist()
meta['reaction_type_id'] += [0] * len(ids) # minhtoo add
meta['id'] += ids # minhtoo add
# meta['id'] += data_df['id'].tolist()
logger.info(f"Saving 'x' to {self.x_path}")
pd.DataFrame(x).to_csv(self.x_path, sep='\t')
logger.info(f"Saving {self.metadata_path}")
pd.DataFrame(meta).to_csv(self.metadata_path, sep='\t')
split_path = os.path.join(self.dir, 'default_split.csv')
logger.info(f"Saving default split to {split_path}")
pd.DataFrame(split).to_csv(split_path)
|
py | b40e5990ff0945242ad4e30701545cef558654eb | """Empty init file to ensure documentation for multi-agent envs is created."""
from flow.envs.multiagent.base import MultiEnv
from flow.envs.multiagent.ring.wave_attenuation import \
MultiWaveAttenuationPOEnv
from flow.envs.multiagent.ring.accel import MultiAgentAccelEnv
from flow.envs.multiagent.traffic_light_grid import MultiTrafficLightGridPOEnv
from flow.envs.multiagent.highway import MultiAgentHighwayPOEnv
from flow.envs.multiagent.bayesian_0_no_grid_env import Bayesian0NoGridEnv
from flow.envs.multiagent.bayesian_1_inference_env import Bayesian1InferenceEnv
from flow.envs.multiagent.bayesian_l2_cooperative_env import BayesianL2CooperativeEnv, BayesianL2CooperativeEnvWithQueryEnv
__all__ = ['MultiEnv', 'MultiAgentAccelEnv', 'MultiWaveAttenuationPOEnv',
'MultiTrafficLightGridPOEnv', 'MultiAgentHighwayPOEnv',
'Bayesian0NoGridEnv', 'Bayesian1InferenceEnv', "BayesianL2CooperativeEnv",
"BayesianL2CooperativeEnvWithQueryEnv"]
|
py | b40e5a7d0f13eb6d6e0729153f40ddf5ef84476e | """Write Word
This program uses :py:mod:`docx` to write Word documents.
Note:
* Example files can be downloaded from http://nostarch.com/automatestuff/
"""
def main():
import docx
# Changing Run Attributes
doc = docx.Document("demo.docx")
print(doc.paragraphs[0].text)
print(doc.paragraphs[0].style)
doc.paragraphs[0].style = "Normal"
print(doc.paragraphs[1].text)
print((doc.paragraphs[1].runs[0].text, doc.paragraphs[1].runs[1].text,
doc.paragraphs[1].runs[2].text, doc.paragraphs[1].runs[3].text))
doc.paragraphs[1].runs[0].style = "Quote Char"
doc.paragraphs[1].runs[1].underline = True
doc.paragraphs[1].runs[3].underline = True
doc.save("restyled.docx")
# Writing Word Documents
doc = docx.Document()
print(doc.add_paragraph("Hello world!"))
doc.save("helloworld.docx")
doc = docx.Document()
print(doc.add_paragraph("Hello world!"))
paraObj1 = doc.add_paragraph("This is a second paragraph.")
paraObj2 = doc.add_paragraph("This is yet another paragraph.")
print(paraObj1.add_run(" This text is being added to the second paragraph."))
print(doc.add_paragraph("Hello world!", "Title"))
doc.save("multipleParagraphs.docx")
# Adding Headings
doc = docx.Document()
doc.add_heading("Header 0", 0)
doc.add_heading("Header 1", 1)
doc.add_heading("Header 2", 2)
doc.add_heading("Header 3", 3)
doc.add_heading("Header 4", 4)
doc.save("headings.docx")
# Adding Line and Page Breaks
doc = docx.Document()
doc.add_paragraph("This is on the first page!")
doc.paragraphs[0].runs[0].add_break(docx.enum.text.WD_BREAK.PAGE)
doc.add_paragraph("This is on the second page!")
doc.save("twoPage.docx")
# Adding Pictures
doc = docx.Document()
doc.add_picture("zophie.png", width=docx.shared.Inches(1),
height=docx.shared.Cm(4))
doc.save("picture.docx")
if __name__ == '__main__':
main()
|
py | b40e5ab5a2fc6916676e276525c68619b8b8b8b5 | import json
import os
import re
import time
import boto3
def execute_notebook(
*,
image,
input_path,
output_prefix,
notebook,
parameters,
role,
instance_type,
rule_name,
extra_args,
):
session = ensure_session()
region = session.region_name
account = session.client("sts").get_caller_identity()["Account"]
if not image:
image = "notebook-runner"
if "/" not in image:
image = f"{account}.dkr.ecr.{region}.amazonaws.com/{image}"
if ":" not in image:
image = image + ":latest"
if not role:
role = f"BasicExecuteNotebookRole-{region}"
if "/" not in role:
role = f"arn:aws:iam::{account}:role/{role}"
if output_prefix is None:
output_prefix = os.path.dirname(input_path)
if notebook == None:
notebook = input_path
base = os.path.basename(notebook)
nb_name, nb_ext = os.path.splitext(base)
timestamp = time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime())
job_name = (
("papermill-" + re.sub(r"[^-a-zA-Z0-9]", "-", nb_name))[: 62 - len(timestamp)]
+ "-"
+ timestamp
)
input_directory = "/opt/ml/processing/input/"
local_input = input_directory + os.path.basename(input_path)
result = "{}-{}{}".format(nb_name, timestamp, nb_ext)
local_output = "/opt/ml/processing/output/"
api_args = {
"ProcessingInputs": [
{
"InputName": "notebook",
"S3Input": {
"S3Uri": input_path,
"LocalPath": input_directory,
"S3DataType": "S3Prefix",
"S3InputMode": "File",
"S3DataDistributionType": "FullyReplicated",
},
},
],
"ProcessingOutputConfig": {
"Outputs": [
{
"OutputName": "result",
"S3Output": {
"S3Uri": output_prefix,
"LocalPath": local_output,
"S3UploadMode": "EndOfJob",
},
},
],
},
"ProcessingJobName": job_name,
"ProcessingResources": {
"ClusterConfig": {
"InstanceCount": 1,
"InstanceType": instance_type,
"VolumeSizeInGB": 40,
}
},
"StoppingCondition": {"MaxRuntimeInSeconds": 7200},
"AppSpecification": {
"ImageUri": image,
"ContainerArguments": ["run_notebook",],
},
"RoleArn": role,
"Environment": {},
}
if extra_args is not None:
api_args = merge_extra(api_args, extra_args)
api_args["Environment"]["PAPERMILL_INPUT"] = local_input
api_args["Environment"]["PAPERMILL_OUTPUT"] = local_output + result
if os.environ.get("AWS_DEFAULT_REGION") != None:
api_args["Environment"]["AWS_DEFAULT_REGION"] = os.environ["AWS_DEFAULT_REGION"]
api_args["Environment"]["PAPERMILL_PARAMS"] = json.dumps(parameters)
api_args["Environment"]["PAPERMILL_NOTEBOOK_NAME"] = base
if rule_name is not None:
api_args["Environment"]["AWS_EVENTBRIDGE_RULE"] = rule_name
client = boto3.client("sagemaker")
result = client.create_processing_job(**api_args)
job_arn = result["ProcessingJobArn"]
job = re.sub("^.*/", "", job_arn)
return job
def merge_extra(orig, extra):
result = dict(orig)
result["ProcessingInputs"].extend(extra.get("ProcessingInputs", []))
result["ProcessingOutputConfig"]["Outputs"].extend(
extra.get("ProcessingOutputConfig", {}).get("Outputs", [])
)
if "KmsKeyId" in extra.get("ProcessingOutputConfig", {}):
result["ProcessingOutputConfig"]["KmsKeyId"] = extra["ProcessingOutputConfig"][
"KmsKeyId"
]
result["ProcessingResources"]["ClusterConfig"] = {
**result["ProcessingResources"]["ClusterConfig"],
**extra.get("ProcessingResources", {}).get("ClusterConfig", {}),
}
result = {
**result,
**{
k: v
for k, v in extra.items()
if k in ["ExperimentConfig", "NetworkConfig", "StoppingCondition", "Tags"]
},
}
return result
def ensure_session(session=None):
"""If session is None, create a default session and return it. Otherwise return the session passed in"""
if session is None:
session = boto3.session.Session()
return session
def lambda_handler(event, context):
job = execute_notebook(
image=event.get("image"),
input_path=event["input_path"],
output_prefix=event.get("output_prefix"),
notebook=event.get("notebook"),
parameters=event.get("parameters", dict()),
role=event.get("role"),
instance_type=event.get("instance_type", "ml.m5.large"),
rule_name=event.get("rule_name"),
extra_args=event.get("extra_args"),
)
return {"job_name": job}
|
py | b40e5bd757c28f11d50ff264cf62e2988b300dee | import unittest
import sys
sys.path.append(".")
from app.models import Article
class ArticleTest(unittest.TestCase):
'''
Test Class to test the behaviour of the News class
'''
def setUp(self):
'''
Set up method that will run before every Test
'''
# self,,,,,source,,,urlToImage
self.new_news = Article(
url='https://image.tmdb.org/t/p/w500/khsjha27hbs',
author='Kingkrusha',
content='Python Must Be Crazy',
description='A thrilling new Python Series',
publishedAt=129993,
source='fox',
title='Python Must Be Crazy',
urlToImage='https://image.tmdb.org/t/p/w500/khsjha27hbs')
def test_instance(self):
self.assertTrue(isinstance(self.new_news,Article))
def test_author(self):
author_var = self.new_news.author
self.assertTrue(author_var == "Kingkrusha")
def test_content(self):
content_var = self.new_news.content
self.assertTrue(content_var == "Python Must Be Crazy")
def test_description(self):
description_var = self.new_news.description
self.assertTrue(description_var == "A thrilling new Python Series")
def test_publishedAt(self):
publishedAt_int = self.new_news.publishedAt
self.assertTrue(publishedAt_int == 129993)
def test_url(self):
url_var = self.new_news.url
self.assertTrue(url_var == "https://image.tmdb.org/t/p/w500/khsjha27hbs")
def test_title(self):
title_var = self.new_news.title
self.assertTrue(title_var == "Python Must Be Crazy")
def test_source(self):
source_var = self.new_news.source
self.assertTrue(source_var == "fox")
def test_urlToImage(self):
urlToImage_var = self.new_news.urlToImage
self.assertTrue(urlToImage_var == "https://image.tmdb.org/t/p/w500/khsjha27hbs")
if __name__ == '__main__':
unittest.main() |
py | b40e5d71ef6f4988ed036fa4f816bfafd3d5a483 | """
Test file for everything IPC socket related
"""
import pytest
from osbrain import run_agent
from osbrain import run_nameserver
from osbrain.helper import agent_dies
from osbrain.helper import wait_condition
from common import nsproxy # noqa: F401
from common import skip_windows_ipc
pytestmark = skip_windows_ipc
def test_agent_close_ipc_socket_agent_shutdown(nsproxy):
"""
Check that the socket is closed and the socket file removed after the agent
is shut down.
"""
agent = run_agent('name')
address = agent.bind('PUSH')
agent.shutdown()
assert agent_dies('name', nsproxy)
assert wait_condition(address.address.exists, negate=True)
def test_agent_close_ipc_socket_agent_kill(nsproxy):
"""
Check that the socket is closed and the socket file removed after the agent
is killed.
"""
agent = run_agent('name')
address = agent.bind('PUSH')
agent.oneway.kill()
assert agent_dies('name', nsproxy)
assert wait_condition(address.address.exists, negate=True)
def test_agent_close_ipc_socket_agent_blocked_nameserver_shutdown():
"""
Check that the socket is closed and the socket file removed when the name
server is shut down having a blocked agent.
"""
def block(agent):
agent.send('out', 'blocking...')
ns = run_nameserver()
blocker = run_agent('blocker')
blocker.set_method(block)
addr = blocker.bind('PUSH', alias='out')
blocker.after(0, 'block')
ns.shutdown(timeout=1.)
assert wait_condition(addr.address.exists, negate=True)
def test_agent_close_ipc_socket_agent_crash_nameserver_shutdown():
"""
Check that the socket is closed and the socket file removed after the agent
crashes and the name server calls for shutdown.
"""
ns = run_nameserver()
agent = run_agent('agent')
addr = agent.bind('PUSH', 'main')
with pytest.raises(RuntimeError):
agent.raise_exception()
ns.shutdown()
assert wait_condition(addr.address.exists, negate=True)
def test_agent_close_ipc_socket_nameserver_shutdown():
"""
Check that the socket is closed and the socket file removed after the name
server is shut down.
"""
ns = run_nameserver()
agent = run_agent('agent')
addr = agent.bind('PUSH', 'main')
ns.shutdown()
assert wait_condition(addr.address.exists, negate=True)
|
py | b40e5e1791e27297951893a5de4f5320368b9abf | # Lint as: python3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Jax recurrent layer implementation.
The main interface of this module is recurrent_func().
This expects the caller to describe the recurrent neural net by specifying:
- theta: the "weights" each RNN uses.
- states_0: the initial state of each RNN.
- cell_fn: A python function describing RNN cell. It must have the following
signature::
cell_fn: (theta, states_0, inputs) -> states_1
states_1 is the next RNN state.
recurrent_func computes, roughly::
state = states_0
t = 0
while t < seq_length:
state = cell_fn(theta, state, inputs[t, :])
accumulate_state[t, :] = state
t += 1
return accumulate_state, state
"""
import enum
import functools
from typing import Callable, Optional, Tuple
import jax
from jax import ad_checkpoint
from jax import numpy as jnp
from lingvo.jax import base_layer
from lingvo.jax import py_utils
from lingvo.jax import pytypes
import tensorflow.compat.v2 as tf
NestedMap = py_utils.NestedMap
WeightInit = py_utils.WeightInit
WeightParams = py_utils.WeightParams
ParamsT = pytypes.ParamsT
JTensor = pytypes.JTensor
CallableOrNone = Optional[Callable]
NestedMapOrNone = Optional[NestedMap]
@enum.unique
class AutodiffCheckpointType(str, enum.Enum):
"""jax.checkpoint policy types."""
SAVE_EVERYTHING = 'save_everything'
SAVE_NOTHING = 'save_nothing'
SAVE_DOT_ONLY = 'save_dot_only'
SAVE_DOT_WITH_NO_BATCH_DIM = 'save_dot_with_no_batch_dims'
SAVE_DOT_FOR_MLPERF_200B = 'save_dot_for_mlperf_200b'
def recurrent_func(theta: NestedMap, states_0: NestedMap, inputs: NestedMap,
cell_fn: Callable[[NestedMap, NestedMap, NestedMap],
NestedMap]):
"""Computes a recurrent neural net.
Args:
theta: weights. A `.NestedMap`.
states_0: initial state. A `.NestedMap`.
inputs: inputs. A `.NestedMap`.
cell_fn: A python function which computes::
states_1 = cell_fn(theta, states_0, inputs[t, :])
Returns:
`accumulate_state` and the final state.
"""
input_seq_len = inputs.Flatten()[0].shape[0]
def assert_not_none(x):
assert x is not None
tf.nest.map_structure(assert_not_none, states_0)
tf.nest.map_structure(assert_not_none, inputs)
tf.nest.map_structure(assert_not_none, theta)
def new_cum_state(x):
x1 = jnp.expand_dims(x, 0)
# +1 so that we can store initial_states at position 0.
return jnp.tile(x1, [input_seq_len + 1] + [1] * x.ndim)
cumulative_states = states_0.Transform(new_cum_state)
prng_key = base_layer.NextPrngKey()
global_step = base_layer.CurGlobalStep()
start_time = jnp.array(0, dtype=jnp.uint32)
fwd_initial_loop_vars = NestedMap(
cur_time=start_time,
theta=theta,
states_0=states_0,
cumulative_states=cumulative_states,
inputs=inputs)
def same_type_shape(x, y):
assert x.dtype == y.dtype, (x.dtype, y.dtype)
assert x.shape == y.shape, (x.shape, y.shape)
def wrapped_cell_fn(fn_in):
# fn_in is NestedMap containing the following elements:
# - t
# - theta
# - states_0
# - inputs_t
# Start a chain of prng key that also takes into account of time steps.
t = fn_in.t
theta = fn_in.theta
states_0 = fn_in.states_0
inputs_t = fn_in.inputs_t
with base_layer.JaxContext.NewContext(
prng_key=jax.random.fold_in(prng_key, t), global_step=global_step):
states_1 = cell_fn(theta, states_0, inputs_t)
tf.nest.assert_same_structure(states_0, states_1)
tf.nest.map_structure(same_type_shape, states_0, states_1)
return states_1
def wrapped_cell_fn_grad(fn_in, d_fn_out):
# This is roughly the following:
#
# fn_out = wrapped_cell_fn(fn_in)
# d_fn_in = tf.gradient(fn_out, fn_in, d_fn_out)
# return d_fn_in
#
assert isinstance(fn_in, NestedMap)
fn_out, vjp_fn = jax.vjp(wrapped_cell_fn, fn_in)
del fn_out
d_fn_in = vjp_fn(d_fn_out)
assert isinstance(d_fn_in, tuple)
assert len(d_fn_in) == 1
d_fn_in_0 = d_fn_in[0]
# Over-write gradient for t, the time step.
d_fn_in_0.t = jnp.zeros_like(fn_in.t)
tf.nest.assert_same_structure(fn_in, d_fn_in_0)
tf.nest.map_structure(same_type_shape, fn_in, d_fn_in_0)
return d_fn_in_0
def fwd_comp_fn(loop_vars):
# loop_vars is a NestedMap containing the following elements:
# - cur_time
# - theta
# - inputs
# - cumulative_states
# - states_0
t = loop_vars.cur_time
theta = loop_vars.theta
inputs = loop_vars.inputs
cumulative_states = loop_vars.cumulative_states
states_0 = loop_vars.states_0
inputs_t = inputs.Transform(lambda x: x[t])
states_1 = wrapped_cell_fn(
NestedMap(t=t, theta=theta, states_0=states_0, inputs_t=inputs_t))
def set_t(x, x_t):
return x.at[t + 1].set(x_t)
cumulative_states = tf.nest.map_structure(set_t, cumulative_states,
states_1)
loop_out = NestedMap(
cur_time=t + 1,
theta=theta,
inputs=inputs,
states_0=states_1,
cumulative_states=cumulative_states)
return loop_out
def fwd_continue_fn(loop_vars):
return loop_vars.cur_time < input_seq_len
# This custom_vjp implementation follows examples here:
# https://jax.readthedocs.io/en/latest/notebooks/Custom_derivative_rules_for_Python_code.html
@jax.custom_vjp
def fwd_loop(loop_vars):
final_loop_vars = jax.lax.while_loop(fwd_continue_fn, fwd_comp_fn,
loop_vars)
return NestedMap(
final_states=final_loop_vars.states_0,
cumulative_states=final_loop_vars.cumulative_states)
def loop_fn_vjp_fwd(loop_vars):
loop_fn_out = fwd_loop(loop_vars)
return loop_fn_out, (loop_vars, loop_fn_out.cumulative_states)
def loop_fn_vjp_bwd(res, d_out):
fwd_loop_vars, cumulative_states = res
d_final_states = d_out.final_states
d_cumulative_states = d_out.cumulative_states
start_time = input_seq_len - 1
d_states_1 = tf.nest.map_structure(lambda x, y: x[start_time + 1] + y,
d_cumulative_states, d_final_states)
bwd_loop_vars = NestedMap(
cur_time=start_time,
theta=fwd_loop_vars.theta,
inputs=fwd_loop_vars.inputs,
cumulative_states=cumulative_states,
d_cumulative_states=d_cumulative_states,
d_theta=fwd_loop_vars.theta.Transform(jnp.zeros_like),
d_inputs=fwd_loop_vars.inputs.Transform(jnp.zeros_like),
d_states_1=d_states_1)
def bwd_comp_fn(loop_vars):
t = loop_vars.cur_time
inputs = loop_vars.inputs
inputs_t = inputs.Transform(lambda x: x[t])
states_0 = loop_vars.cumulative_states.Transform(lambda x: x[t])
d_cell_in = wrapped_cell_fn_grad(
NestedMap(
t=t, theta=loop_vars.theta, states_0=states_0, inputs_t=inputs_t),
loop_vars.d_states_1)
d_theta = tf.nest.map_structure(lambda x, y: x + y, loop_vars.d_theta,
d_cell_in.theta)
d_states_0 = tf.nest.map_structure(lambda x, y: x + y[t],
d_cell_in.states_0,
loop_vars.d_cumulative_states)
def set_t(x, x_t):
return x.at[t].set(x_t)
d_inputs = tf.nest.map_structure(set_t, loop_vars.d_inputs,
d_cell_in.inputs_t)
loop_vars_out = loop_vars.Transform(lambda x: x)
loop_vars_out.d_inputs = d_inputs
loop_vars_out.d_states_1 = d_states_0
loop_vars_out.d_theta = d_theta
loop_vars_out.cur_time = t - 1
return loop_vars_out
def bwd_continue_fn(loop_vars):
return loop_vars.cur_time >= 0
bwd_final_loop_vars = jax.lax.while_loop(bwd_continue_fn, bwd_comp_fn,
bwd_loop_vars)
d_out = fwd_loop_vars.Transform(jnp.zeros_like)
tf.nest.map_structure(same_type_shape, d_out.states_0,
bwd_final_loop_vars.d_states_1)
tf.nest.map_structure(same_type_shape, d_out.theta,
bwd_final_loop_vars.d_theta)
tf.nest.map_structure(same_type_shape, d_out.inputs,
bwd_final_loop_vars.d_inputs)
d_out.states_0 = bwd_final_loop_vars.d_states_1
d_out.theta = bwd_final_loop_vars.d_theta
d_out.inputs = bwd_final_loop_vars.d_inputs
return (d_out,)
fwd_loop.defvjp(loop_fn_vjp_fwd, loop_fn_vjp_bwd)
# Finally, let's simply run the forward loop fn.
fwd_final_loop_vars = fwd_loop(fwd_initial_loop_vars)
fwd_cumulative_states = fwd_final_loop_vars.cumulative_states.Transform(
lambda x: x[1:])
return fwd_final_loop_vars.final_states, fwd_cumulative_states
def recurrent_static(theta: NestedMap,
states_0: NestedMap,
inputs: NestedMap,
cell_fn: Callable[[NestedMap, NestedMap, NestedMap],
NestedMap],
root_layer: Optional[base_layer.BaseLayer] = None):
"""A simpler form of Recurrent where num of steps is known statically.
Back-prop is availale through auto-diff.
'padding' in inputs is used to skip certain steps dynamically. If the
'padding' tensor exists, it is expected of a binary 0/1 tensor.
Args:
theta: weights. A `.NestedMap`.
states_0: initial state. A `.NestedMap`.
inputs: inputs. A `.NestedMap`. All inputs in time-major.
cell_fn: A python function which computes::
states_1 = cell_fn(theta, states_0, inputs[t, :])
root_layer: The root layer within which this recurrent_static recurrent loop
is carried out.
Returns:
`accumulate_state` and the final state.
"""
assert 'time_step' not in states_0
# The initial time step.
time_step = jnp.array(0, dtype=jnp.uint32)
# Make a copy of states_0 structure.
states_0 = tf.nest.map_structure(lambda x: x, states_0)
states_0.time_step = time_step
prng_key = base_layer.NextPrngKey()
global_step = base_layer.CurGlobalStep()
# TODO(zhangqiaorjc): Switch to ad_checkpoint.checkpoint after mattjj bug fix.
@jax.checkpoint
def comp_fn(states_0, inputs_t):
# Start a new prng_key branch that also depends on the time step.
if root_layer is not None:
forward_updated_vars_before = tf.nest.map_structure(
lambda x: x, root_layer.forward_updated_vars)
prng_key_t = jax.random.fold_in(prng_key, states_0.time_step)
with base_layer.JaxContext.NewContext(
prng_key=prng_key_t, global_step=global_step):
# Whether or not we should skip this time step.
if 'padding' in inputs_t:
# We skip if all are padded steps.
skip = jnp.all(inputs_t.padding > 0.5)
else:
skip = jnp.array(False)
def carry_over(args):
states_0, inputs_t = args
del inputs_t
# We simply carry over the states for this time step.
states_1 = tf.nest.map_structure(lambda x: x, states_0)
states_1.time_step = states_0.time_step + 1
return states_1
def do_compute(args):
states_0, inputs_t = args
# Actually carry out the computation.
states_1 = cell_fn(theta, states_0, inputs_t)
states_1.time_step = states_0.time_step + 1
return states_1
if 'padding' in inputs_t:
states_1 = jax.lax.cond(skip, carry_over, do_compute,
(states_0, inputs_t))
else:
states_1 = do_compute((states_0, inputs_t))
tf.nest.assert_same_structure(states_0, states_1)
if root_layer is not None:
forward_updated_vars_after = tf.nest.map_structure(
lambda x: x, root_layer.forward_updated_vars)
def assert_no_change(x, y):
assert (x is None and y is None) or (x is not None and y is not None)
tf.nest.map_structure(assert_no_change, forward_updated_vars_before,
forward_updated_vars_after)
return states_1, states_1
final_states, cumulative_states = jax.lax.scan(comp_fn, states_0, inputs)
del final_states.time_step
del cumulative_states.time_step
return final_states, cumulative_states
def scan(carry_init: NestedMap,
xs: NestedMap,
fn: Callable[[NestedMap, NestedMap], Tuple[NestedMap, NestedMap]],
root_layer: Optional[base_layer.BaseLayer] = None,
checkpoint_policy: AutodiffCheckpointType = AutodiffCheckpointType
.SAVE_NOTHING):
"""A simple wrap around jax.lax.scan.
Back-prop is availale through auto-diff.
Args:
carry_init: initial state. A `.NestedMap`.
xs: inputs. A `.NestedMap`. All inputs in time-major.
fn: A python function which computes:
carry, ys[t] = fn(carry, xs[t, :])
root_layer: The root layer within which this jax.lax.scan based while_loop
is carried out. If root_layer is provided, some basic-effort check is
performed to make sure fn is side-effect free. Otherwise, no such checks
are performed.
checkpoint_policy: A AutodiffCheckpointType. How to checkpoint for BProp:
SAVE_NOTHING, SAVE_DOT_ONLY, SAVE_DOT_WITH_NO_BATCH_DIM.
Returns:
final 'carry' as well as 'ys'.
"""
assert isinstance(carry_init, py_utils.NestedMap)
assert isinstance(xs, py_utils.NestedMap)
# Make a copy of carry_init structure.
carry_init = tf.nest.map_structure(lambda x: x, carry_init)
# "carry" will be augmented with the following three tensors, so make sure
# they don't already exist in the NestedMap.
assert 'time_step' not in carry_init
assert 'prng_key' not in carry_init
assert 'global_step' not in carry_init
def custom_policy(checkpoint_policy: AutodiffCheckpointType):
if checkpoint_policy == AutodiffCheckpointType.SAVE_EVERYTHING:
return jax.checkpoint_policies.everything_saveable
if checkpoint_policy == AutodiffCheckpointType.SAVE_DOT_ONLY:
return jax.checkpoint_policies.checkpoint_dots
if checkpoint_policy == AutodiffCheckpointType.SAVE_DOT_WITH_NO_BATCH_DIM:
return jax.checkpoint_policies.checkpoint_dots_with_no_batch_dims
# TODO(zhangqiaorjc): Configure custom checkpoint policy in expt config
# without introducing enum.
if checkpoint_policy == AutodiffCheckpointType.SAVE_DOT_FOR_MLPERF_200B:
return jax.checkpoint_policies.save_only_these_names(
'combined_qkv_proj', 'query_proj', 'value_proj', 'key_proj',
'context', 'out_proj')
assert checkpoint_policy == AutodiffCheckpointType.SAVE_NOTHING
return jax.checkpoint_policies.nothing_saveable
@functools.partial(
ad_checkpoint.checkpoint,
prevent_cse=False,
policy=custom_policy(checkpoint_policy))
def fn_wrap(carry, xs_t):
# carry is augmented with time_step, prng_key, global_step three additional
# tensors to make fn_wrap fully functional.
if root_layer is not None:
forward_updated_vars_before = tf.nest.map_structure(
lambda x: x, root_layer.forward_updated_vars)
# Start a new prng_key branch that also depends on the time step.
prng_key_t = jax.random.fold_in(carry.prng_key, carry.time_step)
with base_layer.JaxContext.NewContext(
prng_key=prng_key_t, global_step=carry.global_step):
carry_new, ys_t = fn(carry, xs_t)
carry_new.time_step = carry.time_step + 1
# copy over prng_key and global_step
carry_new.prng_key = carry.prng_key
carry_new.global_step = carry.global_step
tf.nest.assert_same_structure(carry_new, carry)
if root_layer is not None:
forward_updated_vars_after = tf.nest.map_structure(
lambda x: x, root_layer.forward_updated_vars)
def assert_no_change(x, y):
assert (x is None and y is None) or (x is not None and y is not None)
# Make sure fn doesn't have side-effect, in particular it doesn't
# update any forward-vars.
tf.nest.map_structure(assert_no_change, forward_updated_vars_before,
forward_updated_vars_after)
return carry_new, ys_t
# The initial time step.
time_step = jnp.array(0, dtype=jnp.uint32)
prng_key = base_layer.NextPrngKey()
global_step = base_layer.CurGlobalStep()
carry_init.time_step = time_step
carry_init.prng_key = prng_key
carry_init.global_step = global_step
carry_final, ys = jax.lax.scan(fn_wrap, carry_init, xs)
del carry_final.time_step
del carry_final.global_step
del carry_final.prng_key
return carry_final, ys
|
py | b40e5e2c190a92cda19faafdd910d2171fbe835a | #!/usr/bin/env python
import Tkinter
import time
import threading
import random
import Queue
import numpy as np
import socket
import sys
import json
from PIL import Image, ImageTk
import traceback
import struct
CONFIG_FILE = './asi_client.conf'
# ASI config
ASI_ADDRESS = 'localhost'
ASI_PORT = 10001
ASI_X = 1280
ASI_Y = 960
ASI_IMG_SIZE = ASI_X * ASI_Y
# millisec
MIN_EXP_TIME = 1
MAX_EXP_TIME = 15000
DEFAULT_EXP_TIME = 20
SOCKET_TIMEOUT = 20
class GuiPart:
def __init__(self, master, queue, endCommand):
with open(CONFIG_FILE) as f:
self.config = json.load(f)
self.master = master
self.queue = queue
self.master.title("ASI client")
self.master.geometry("630x600")
self.master.protocol("WM_DELETE_WINDOW", endCommand)
self.frame_img = Tkinter.Frame(self.master)
self.frame_img.grid(row=0, column=0)
self.canvas = Tkinter.Canvas(self.frame_img, width=600, height=450)
self.canvas.grid(row=0, column=0, rowspan=2, sticky="N")
self.slider_exp = Tkinter.Scale(self.frame_img, from_=MIN_EXP_TIME, to=MAX_EXP_TIME, resolution=10, length=580, orient=Tkinter.HORIZONTAL, variable=exp_time, label='Exp time (ms)')
self.slider_exp.set(DEFAULT_EXP_TIME)
self.slider_exp.grid(row=1, column=0)
self.slider_gain = Tkinter.Scale(self.frame_img, from_=0, to=300, length=580, orient=Tkinter.HORIZONTAL, variable=gain, label='Gain')
self.slider_gain.set(150)
self.slider_gain.grid(row=2, column=0)
self.frame_ch = Tkinter.Frame(self.frame_img)
self.frame_ch.grid(row=3, column=0)
self.crosshair_x_label = Tkinter.Label(self.frame_ch, text='Crosshair: x')
self.crosshair_x_label.grid(row=0, column=0, sticky="W")
self.crosshair_x = Tkinter.Entry(self.frame_ch)
self.crosshair_x.insert(0, self.config['crosshair'][0])
self.crosshair_x.grid(row=0, column=1, sticky="W")
self.crosshair_y_label = Tkinter.Label(self.frame_ch, text='y')
self.crosshair_y_label.grid(row=0, column=2, sticky="W")
self.crosshair_y = Tkinter.Entry(self.frame_ch)
self.crosshair_y.insert(0, self.config['crosshair'][1])
self.crosshair_y.grid(row=0, column=3, sticky="W")
self.canvas_image = None
def processIncomingImage(self, msg):
data = msg['image']
if self.canvas_image is not None:
self.canvas.delete(self.canvas_image)
self.im = Image.frombytes('L', (data.shape[1],data.shape[0]), data.astype('b').tostring()).resize((853,640))
self.photo = ImageTk.PhotoImage(image=self.im)
self.canvas.delete('all')
self.canvas_image = self.canvas.create_image(0,0,image=self.photo,anchor=Tkinter.NW)
# draw crosshair
try:
x = int(self.crosshair_x.get())
except ValueError:
x = 0
try:
y = int(self.crosshair_y.get())
except ValueError:
y = 0
self.canvas.create_line(x, 0, x, y-10, fill='red', width=1)
self.canvas.create_line(x, y+10, x, 640, fill='red', width=1)
self.canvas.create_line(0, y, x-10, y, fill='red', width=1)
self.canvas.create_line(x+10, y, 853, y, fill='red', width=1)
self.config['crosshair'][0] = self.crosshair_x.get()
self.config['crosshair'][1] = self.crosshair_y.get()
with open(CONFIG_FILE, 'w') as f:
json.dump(self.config, f)
def processIncoming(self):
while self.queue.qsize():
try:
self.msg = self.queue.get(0)
if self.msg['type'] == 'image':
self.processIncomingImage(self.msg)
except Queue.Empty:
pass
class ThreadedClient:
def __init__(self, master):
self.master = master
self.queue = Queue.Queue()
self.gui = GuiPart(master, self.queue, self.endApplication)
self.running = 1
self.thread_img = threading.Thread(target=self.getRemoteImage)
self.thread_img.start()
self.periodicCall()
def periodicCall(self):
self.gui.processIncoming()
if not self.running:
sys.exit(1)
self.master.after(100, self.periodicCall)
def getRemoteImage(self):
while self.running:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
sock.connect((ASI_ADDRESS, ASI_PORT))
# send capture parameters
params = {}
params['exp_time'] = 1000*exp_time.get()
params['gain'] = gain.get()
sock.sendall(json.dumps(params))
# receive capture
arr = b''
time_start = time.time()
try:
while len(arr) < ASI_IMG_SIZE:
now = time.time()
if (now - time_start) > SOCKET_TIMEOUT:
break
data = sock.recv(2**16)
if data:
arr += data
image_array = np.frombuffer(arr, dtype=np.dtype(np.uint8)).reshape((ASI_Y, ASI_X))
sock.close()
msg = {'type':'image', 'image':image_array}
self.queue.put(msg)
except:
traceback.print_exc()
pass
def endApplication(self):
self.running = 0
root = Tkinter.Tk()
exp_time = Tkinter.IntVar()
gain = Tkinter.IntVar()
client = ThreadedClient(root)
root.mainloop()
|
py | b40e5eeb9c8e4f599f6778798309339190014700 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = ['Checkpoint']
import glob
import os
from typing import Callable, Optional
from objax.io.ops import load_var_collection, save_var_collection
from objax.typing import FileOrStr
from objax.variable import VarCollection
class Checkpoint:
"""Helper class which performs saving and restoring of the variables.
Variables are stored in the checkpoint files. One checkpoint file stores a single snapshot of the variables.
Different checkpoint files store different snapshots of the variables (for example at different training step).
Each checkpoint has associated index, which is used to identify time when snapshot of the variables was made.
Typically training step or training epoch are used as an index.
"""
DIR_NAME: str = 'ckpt'
"""Name of the subdirectory of model directory where checkpoints will be saved."""
FILE_MATCH: str = '*.npz'
"""File pattern which is used to search for checkpoint files."""
FILE_FORMAT: str = '%010d.npz'
"""Format of the filename of one checkpoint file."""
LOAD_FN: Callable[[FileOrStr, VarCollection], None] = staticmethod(load_var_collection)
"""Load function, which loads variables collection from given file."""
SAVE_FN: Callable[[FileOrStr, VarCollection], None] = staticmethod(save_var_collection)
"""Save function, which saves variables collection into given file."""
def __init__(self, logdir: str, keep_ckpts: int, makedir: bool = True, verbose: bool = True):
"""Creates instance of the Checkpoint class.
Args:
logdir: model directory. Checkpoints will be saved in the subdirectory of model directory.
keep_ckpts: maximum number of checkpoints to keep.
makedir: if True then directory for checkpoints will be created,
otherwise it's expected that directory already exists.
verbose: if True then print when data is restored from checkpoint.
"""
self.logdir = logdir
self.keep_ckpts = keep_ckpts
self.verbose = verbose
if makedir:
os.makedirs(os.path.join(logdir, self.DIR_NAME), exist_ok=True)
@staticmethod
def checkpoint_idx(filename: str):
"""Returns index of checkpoint from given checkpoint filename.
Args:
filename: checkpoint filename.
Returns:
checkpoint index.
"""
return int(os.path.basename(filename).split('.')[0])
def restore(self, vc: VarCollection, idx: Optional[int] = None):
"""Restores values of all variables of given variables collection from the checkpoint.
Old values from the variables collection will be replaced with the new values read from checkpoint.
If variable does not exist in the variables collection, it won't be restored from checkpoint.
Args:
vc: variables collection to restore.
idx: if provided then checkpoint index to use, if None then latest checkpoint will be restored.
Returns:
idx: index of the restored checkpoint.
ckpt: full path to the restored checkpoint.
"""
if idx is None:
all_ckpts = glob.glob(os.path.join(self.logdir, self.DIR_NAME, self.FILE_MATCH))
if not all_ckpts:
return 0, ''
idx = self.checkpoint_idx(max(all_ckpts))
ckpt = os.path.join(self.logdir, self.DIR_NAME, self.FILE_FORMAT % idx)
if self.verbose:
print('Resuming from', ckpt)
self.LOAD_FN(ckpt, vc)
return idx, ckpt
def save(self, vc: VarCollection, idx: int):
"""Saves variables collection to checkpoint with given index.
Args:
vc: variables collection to save.
idx: index of the new checkpoint where variables should be saved.
"""
self.SAVE_FN(os.path.join(self.logdir, self.DIR_NAME, self.FILE_FORMAT % idx), vc)
for ckpt in sorted(glob.glob(os.path.join(self.logdir, self.DIR_NAME, self.FILE_MATCH)))[:-self.keep_ckpts]:
os.remove(ckpt)
|
py | b40e5ef3f2bf0f9475815e49ee978f7e9dce80be | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Init module for TensorFlow Model Analysis addons fairness view."""
from tensorflow_model_analysis.addons.fairness.view import widget_view
|
py | b40e609e633cdbb85941f2f2e922349c081fd288 | import json
def analytics_allowed(request):
cookie_policy = request.cookies.get('ons_cookie_policy')
if cookie_policy:
return json.loads(cookie_policy.replace("'", '"'))['usage']
return False
|
py | b40e61ed82489a600b225a3aece8b7b78d548e8c | # -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from operator import attrgetter
from operator import itemgetter
import re
from netaddr import IPRange
from nailgun.consts import OVS_BOND_MODES
from nailgun.db.sqlalchemy.models import Cluster
from nailgun.db.sqlalchemy.models import NetworkGroup
from nailgun.db.sqlalchemy.models import Node
from nailgun.openstack.common import jsonutils
from nailgun.orchestrator.deployment_serializers import\
create_serializer
from nailgun.orchestrator.deployment_serializers import\
DeploymentHASerializer
from nailgun.orchestrator.deployment_serializers import\
DeploymentHASerializer51
from nailgun.orchestrator.deployment_serializers import\
DeploymentMultinodeSerializer
from nailgun.orchestrator.priority_serializers import\
PriorityHASerializer50
from nailgun.orchestrator.priority_serializers import\
PriorityHASerializer51
from nailgun.orchestrator.priority_serializers import\
PriorityHASerializerPatching
from nailgun.orchestrator.priority_serializers import\
PriorityMultinodeSerializer50
from nailgun.db.sqlalchemy import models
from nailgun import objects
from nailgun.settings import settings
from nailgun.test.base import BaseIntegrationTest
from nailgun.test.base import reverse
from nailgun.volumes import manager
class OrchestratorSerializerTestBase(BaseIntegrationTest):
"""Class containts helpers."""
def filter_by_role(self, nodes, role):
return filter(lambda node: role in node['role'], nodes)
def filter_by_uid(self, nodes, uid):
return filter(lambda node: node['uid'] == uid, nodes)
def assert_nodes_with_role(self, nodes, role, count):
self.assertEqual(len(self.filter_by_role(nodes, role)), count)
def get_controllers(self, cluster_id):
return self.db.query(Node).\
filter_by(cluster_id=cluster_id,
pending_deletion=False).\
filter(Node.role_list.any(name='controller')).\
order_by(Node.id)
@property
def serializer(self):
return DeploymentHASerializer(PriorityHASerializer50())
def serialize(self, cluster):
objects.NodeCollection.prepare_for_deployment(cluster.nodes)
return self.serializer.serialize(cluster, cluster.nodes)
def _make_data_copy(self, data_to_copy):
'''Sqalchemy doesn't track change on composite attribute
so we need to create fresh copy of it which will take all
needed modifications and will be assigned as new value
for that attribute
'''
return copy.deepcopy(data_to_copy)
# TODO(awoodward): multinode deprecation: probably has duplicates
class TestNovaOrchestratorSerializer(OrchestratorSerializerTestBase):
def setUp(self):
super(TestNovaOrchestratorSerializer, self).setUp()
self.cluster = self.create_env('ha_compact')
objects.Cluster.set_primary_roles(self.cluster, self.cluster.nodes)
def create_env(self, mode, network_manager='FlatDHCPManager'):
node_args = [
{'roles': ['controller', 'cinder'], 'pending_addition': True},
{'roles': ['compute', 'cinder'], 'pending_addition': True},
{'roles': ['compute'], 'pending_addition': True},
{'roles': ['mongo'], 'pending_addition': True},
{'roles': [], 'pending_roles': ['cinder'],
'pending_addition': True}]
cluster = self.env.create(
cluster_kwargs={
'mode': mode,
'net_manager': network_manager},
nodes_kwargs=node_args)
cluster_db = self.db.query(Cluster).get(cluster['id'])
objects.NodeCollection.prepare_for_deployment(cluster_db.nodes)
self.db.flush()
return cluster_db
def assert_roles_flattened(self, nodes):
self.assertEqual(len(nodes), 7)
self.assert_nodes_with_role(nodes, 'controller', 1)
self.assert_nodes_with_role(nodes, 'compute', 2)
self.assert_nodes_with_role(nodes, 'cinder', 3)
self.assert_nodes_with_role(nodes, 'mongo', 1)
def test_serialize_nodes(self):
serialized_nodes = self.serializer.serialize_nodes(self.cluster.nodes)
self.assert_roles_flattened(serialized_nodes)
# Each not should be same as result of
# serialize_node function
for serialized_node in serialized_nodes:
node_db = self.db.query(Node).get(int(serialized_node['uid']))
expected_node = self.serializer.serialize_node(
node_db, serialized_node['role'])
self.assertEqual(serialized_node, expected_node)
def test_serialize_node(self):
node = self.env.create_node(
api=True, cluster_id=self.cluster.id, pending_addition=True)
objects.NodeCollection.prepare_for_deployment(self.cluster.nodes)
self.db.flush()
node_db = self.db.query(Node).get(node['id'])
serialized_data = self.serializer.serialize_node(node_db, 'controller')
self.assertEqual(serialized_data['role'], 'controller')
self.assertEqual(serialized_data['uid'], str(node_db.id))
self.assertEqual(serialized_data['status'], node_db.status)
self.assertEqual(serialized_data['online'], node_db.online)
self.assertEqual(serialized_data['fqdn'],
'node-%d.%s' % (node_db.id, settings.DNS_DOMAIN))
self.assertEqual(
serialized_data['glance'],
{'image_cache_max_size': manager.calc_glance_cache_size(
node_db.attributes.volumes)})
def test_node_list(self):
node_list = self.serializer.get_common_attrs(self.cluster)['nodes']
# Check right nodes count with right roles
self.assert_roles_flattened(node_list)
# Check common attrs
for node in node_list:
node_db = self.db.query(Node).get(int(node['uid']))
self.assertEqual(node['public_netmask'], '255.255.255.0')
self.assertEqual(node['internal_netmask'], '255.255.255.0')
self.assertEqual(node['storage_netmask'], '255.255.255.0')
self.assertEqual(node['uid'], str(node_db.id))
self.assertEqual(node['name'], 'node-%d' % node_db.id)
self.assertEqual(node['fqdn'], 'node-%d.%s' %
(node_db.id, settings.DNS_DOMAIN))
# Check uncommon attrs
node_uids = sorted(set([n['uid'] for n in node_list]))
man_ip = [str(ip) for ip in IPRange('192.168.0.1', '192.168.0.5')]
pub_ip = [str(ip) for ip in IPRange('172.16.0.2', '172.16.0.6')]
sto_ip = [str(ip) for ip in IPRange('192.168.1.1', '192.168.1.5')]
expected_list = [
{'roles': ['controller', 'cinder']},
{'roles': ['compute', 'cinder']},
{'roles': ['compute']},
{'roles': ['mongo']},
{'roles': ['cinder']}]
for i in range(len(expected_list)):
expected_list[i]['attrs'] = {'uid': node_uids[i]}
used_man_ip = []
used_pub_ip = []
used_sto_ip = []
for expected in expected_list:
attrs = expected['attrs']
ref_node = self.filter_by_uid(node_list, attrs['uid'])[0]
self.assertTrue(ref_node['internal_address'] in man_ip)
self.assertTrue(ref_node['public_address'] in pub_ip)
self.assertTrue(ref_node['storage_address'] in sto_ip)
self.assertFalse(ref_node['internal_address'] in used_man_ip)
self.assertFalse(ref_node['public_address'] in used_pub_ip)
self.assertFalse(ref_node['storage_address'] in used_sto_ip)
used_man_ip.append(ref_node['internal_address'])
used_pub_ip.append(ref_node['public_address'])
used_sto_ip.append(ref_node['storage_address'])
for role in expected['roles']:
nodes = self.filter_by_role(node_list, role)
node = self.filter_by_uid(nodes, attrs['uid'])[0]
self.assertEqual(node['public_address'],
ref_node['public_address'])
self.assertEqual(node['storage_address'],
ref_node['storage_address'])
self.assertEqual(node['internal_address'],
ref_node['internal_address'])
def test_flatdhcp_manager(self):
cluster = self.create_env('ha_compact')
facts = self.serializer.serialize(cluster, cluster.nodes)
for fact in facts:
self.assertEqual(
fact['novanetwork_parameters']['network_manager'],
'FlatDHCPManager')
self.assertEqual(
fact['novanetwork_parameters']['num_networks'], 1)
self.assertEqual(
fact['novanetwork_parameters']['network_size'], 65536)
def test_vlan_manager(self):
cluster = self.create_env('ha_compact')
data = {'networking_parameters': {'net_manager': 'VlanManager'}}
url = reverse('NovaNetworkConfigurationHandler',
kwargs={'cluster_id': cluster.id})
self.app.put(url, jsonutils.dumps(data),
headers=self.default_headers,
expect_errors=False)
facts = self.serializer.serialize(cluster, cluster.nodes)
for fact in facts:
self.assertEqual(fact['vlan_interface'], 'eth0')
self.assertEqual(fact['fixed_interface'], 'eth0')
self.assertEqual(
fact['novanetwork_parameters']['network_manager'],
'VlanManager')
self.assertEqual(
fact['novanetwork_parameters']['num_networks'], 1)
self.assertEqual(
fact['novanetwork_parameters']['vlan_start'], 103)
self.assertEqual(
fact['novanetwork_parameters']['network_size'], 256)
def test_floating_ranges_generation(self):
# Set ip ranges for floating ips
ranges = [['172.16.0.2', '172.16.0.4'],
['172.16.0.3', '172.16.0.5'],
['172.16.0.10', '172.16.0.12']]
self.cluster.network_config.floating_ranges = ranges
self.db.commit()
facts = self.serializer.serialize(self.cluster, self.cluster.nodes)
for fact in facts:
self.assertEqual(
fact['floating_network_range'],
['172.16.0.2-172.16.0.4',
'172.16.0.3-172.16.0.5',
'172.16.0.10-172.16.0.12'])
def test_configure_interfaces_untagged_network(self):
for network in self.db.query(NetworkGroup).all():
network.vlan_start = None
self.cluster.network_config.fixed_networks_vlan_start = None
self.db.commit()
node_db = sorted(self.cluster.nodes, key=lambda n: n.id)[0]
from nailgun.orchestrator.deployment_serializers \
import NovaNetworkDeploymentSerializer
interfaces = NovaNetworkDeploymentSerializer.\
configure_interfaces(node_db)
expected_interfaces = {
'lo': {
'interface': 'lo',
'ipaddr': ['127.0.0.1/8']
},
'eth1': {
'interface': 'eth1',
'ipaddr': ['10.20.0.129/24']
},
'eth0': {
'interface': 'eth0',
'ipaddr': ['172.16.0.2/24',
'192.168.0.1/24',
'192.168.1.1/24'],
'gateway': '172.16.0.1',
'default_gateway': True
}
}
self.datadiff(expected_interfaces, interfaces, ignore_keys=['ipaddr'])
def test_set_deployment_priorities(self):
nodes = [
{'role': 'mongo'},
{'role': 'mongo'},
{'role': 'primary-mongo'},
{'role': 'controller'},
{'role': 'ceph-osd'},
{'role': 'other'}
]
serializer = DeploymentMultinodeSerializer(
PriorityMultinodeSerializer50())
serializer.set_deployment_priorities(nodes)
expected_priorities = [
{'role': 'mongo', 'priority': 100},
{'role': 'mongo', 'priority': 200},
{'role': 'primary-mongo', 'priority': 300},
{'role': 'controller', 'priority': 400},
{'role': 'ceph-osd', 'priority': 500},
{'role': 'other', 'priority': 500}
]
self.assertEqual(expected_priorities, nodes)
def test_set_critital_node(self):
nodes = [
{'role': 'mongo'},
{'role': 'mongo'},
{'role': 'primary-mongo'},
{'role': 'controller'},
{'role': 'ceph-osd'},
{'role': 'other'}
]
serializer = DeploymentMultinodeSerializer(
PriorityMultinodeSerializer50())
serializer.set_critical_nodes(nodes)
expected_ciritial_roles = [
{'role': 'mongo', 'fail_if_error': False},
{'role': 'mongo', 'fail_if_error': False},
{'role': 'primary-mongo', 'fail_if_error': True},
{'role': 'controller', 'fail_if_error': True},
{'role': 'ceph-osd', 'fail_if_error': True},
{'role': 'other', 'fail_if_error': False}
]
self.assertEqual(expected_ciritial_roles, nodes)
class TestNovaOrchestratorHASerializer(OrchestratorSerializerTestBase):
def setUp(self):
super(TestNovaOrchestratorHASerializer, self).setUp()
self.cluster = self.create_env('ha_compact')
objects.Cluster.set_primary_roles(self.cluster, self.cluster.nodes)
def create_env(self, mode):
cluster = self.env.create(
cluster_kwargs={
'mode': mode,
},
nodes_kwargs=[
{'roles': ['controller'], 'pending_addition': True},
{'roles': ['controller'], 'pending_addition': True},
{'roles': ['controller', 'cinder'], 'pending_addition': True},
{'roles': ['compute', 'cinder'], 'pending_addition': True},
{'roles': ['compute'], 'pending_addition': True},
{'roles': ['mongo'], 'pending_addition': True},
{'roles': ['cinder'], 'pending_addition': True}])
cluster_db = self.db.query(Cluster).get(cluster['id'])
objects.NodeCollection.prepare_for_deployment(cluster_db.nodes)
return cluster_db
@property
def serializer(self):
return DeploymentHASerializer(PriorityHASerializer50())
def test_set_deployment_priorities(self):
nodes = [
{'role': 'zabbix-server'},
{'role': 'primary-swift-proxy'},
{'role': 'swift-proxy'},
{'role': 'storage'},
{'role': 'mongo'},
{'role': 'primary-mongo'},
{'role': 'primary-controller'},
{'role': 'controller'},
{'role': 'controller'},
{'role': 'ceph-osd'},
{'role': 'other'}
]
self.serializer.set_deployment_priorities(nodes)
expected_priorities = [
{'role': 'zabbix-server', 'priority': 100},
{'role': 'primary-swift-proxy', 'priority': 200},
{'role': 'swift-proxy', 'priority': 300},
{'role': 'storage', 'priority': 400},
{'role': 'mongo', 'priority': 500},
{'role': 'primary-mongo', 'priority': 600},
{'role': 'primary-controller', 'priority': 700},
{'role': 'controller', 'priority': 800},
{'role': 'controller', 'priority': 900},
{'role': 'ceph-osd', 'priority': 1000},
{'role': 'other', 'priority': 1000}
]
self.assertEqual(expected_priorities, nodes)
def test_set_deployment_priorities_many_cntrls(self):
nodes = [
{'role': 'zabbix-server'},
{'role': 'primary-swift-proxy'},
{'role': 'swift-proxy'},
{'role': 'storage'},
{'role': 'mongo'},
{'role': 'primary-mongo'},
{'role': 'primary-controller'},
{'role': 'controller'},
{'role': 'controller'},
{'role': 'controller'},
{'role': 'controller'},
{'role': 'controller'},
{'role': 'controller'},
{'role': 'controller'},
{'role': 'controller'},
{'role': 'ceph-osd'},
{'role': 'other'}
]
self.serializer.set_deployment_priorities(nodes)
expected_priorities = [
{'role': 'zabbix-server', 'priority': 100},
{'role': 'primary-swift-proxy', 'priority': 200},
{'role': 'swift-proxy', 'priority': 300},
{'role': 'storage', 'priority': 400},
{'role': 'mongo', 'priority': 500},
{'role': 'primary-mongo', 'priority': 600},
{'role': 'primary-controller', 'priority': 700},
{'role': 'controller', 'priority': 800},
{'role': 'controller', 'priority': 900},
{'role': 'controller', 'priority': 1000},
{'role': 'controller', 'priority': 1100},
{'role': 'controller', 'priority': 1200},
{'role': 'controller', 'priority': 1300},
{'role': 'controller', 'priority': 1400},
{'role': 'controller', 'priority': 1500},
{'role': 'ceph-osd', 'priority': 1600},
{'role': 'other', 'priority': 1600}
]
self.assertEqual(expected_priorities, nodes)
def test_set_critital_node(self):
nodes = [
{'role': 'zabbix-server'},
{'role': 'primary-swift-proxy'},
{'role': 'swift-proxy'},
{'role': 'storage'},
{'role': 'mongo'},
{'role': 'primary-mongo'},
{'role': 'primary-controller'},
{'role': 'controller'},
{'role': 'controller'},
{'role': 'ceph-osd'},
{'role': 'other'}
]
self.serializer.set_critical_nodes(nodes)
expected_ciritial_roles = [
{'role': 'zabbix-server', 'fail_if_error': False},
{'role': 'primary-swift-proxy', 'fail_if_error': True},
{'role': 'swift-proxy', 'fail_if_error': False},
{'role': 'storage', 'fail_if_error': False},
{'role': 'mongo', 'fail_if_error': False},
{'role': 'primary-mongo', 'fail_if_error': True},
{'role': 'primary-controller', 'fail_if_error': True},
{'role': 'controller', 'fail_if_error': False},
{'role': 'controller', 'fail_if_error': False},
{'role': 'ceph-osd', 'fail_if_error': True},
{'role': 'other', 'fail_if_error': False}
]
self.assertEqual(expected_ciritial_roles, nodes)
def test_set_primary_controller_priority_not_depend_on_nodes_order(self):
controllers = filter(lambda n: 'controller' in n.roles, self.env.nodes)
expected_primary_controller = sorted(
controllers, key=attrgetter('id'))[0]
reverse_sorted_controllers = sorted(
controllers, key=attrgetter('id'), reverse=True)
result_nodes = self.serializer.serialize(
self.cluster, reverse_sorted_controllers)
high_priority = sorted(result_nodes, key=itemgetter('priority'))[0]
self.assertEqual(high_priority['role'], 'primary-controller')
self.assertEqual(
int(high_priority['uid']),
expected_primary_controller.id)
def test_node_list(self):
serialized_nodes = self.serializer.node_list(self.cluster.nodes)
for node in serialized_nodes:
# Each node has swift_zone
self.assertEqual(node['swift_zone'], node['uid'])
def test_get_common_attrs(self):
attrs = self.serializer.get_common_attrs(self.cluster)
# vips
self.assertEqual(attrs['management_vip'], '192.168.0.8')
self.assertEqual(attrs['public_vip'], '172.16.0.9')
# last_contrller
controllers = self.get_controllers(self.cluster.id)
self.assertEqual(attrs['last_controller'],
'node-%d' % controllers[-1].id)
# primary_controller
controllers = self.filter_by_role(attrs['nodes'], 'primary-controller')
self.assertEqual(controllers[0]['role'], 'primary-controller')
# primary_mongo
mongo_nodes = self.filter_by_role(attrs['nodes'], 'primary-mongo')
self.assertEqual(mongo_nodes[-1]['role'], 'primary-mongo')
# mountpoints and mp attrs
self.assertEqual(
attrs['mp'],
[{'point': '1', 'weight': '1'},
{'point': '2', 'weight': '2'}])
class TestNovaOrchestratorHASerializer51(TestNovaOrchestratorHASerializer):
@property
def serializer(self):
return DeploymentHASerializer51(PriorityHASerializer51())
def test_set_deployment_priorities(self):
nodes = [
{'role': 'zabbix-server'},
{'role': 'primary-swift-proxy'},
{'role': 'swift-proxy'},
{'role': 'storage'},
{'role': 'mongo'},
{'role': 'primary-mongo'},
{'role': 'primary-controller'},
{'role': 'controller'},
{'role': 'controller'},
{'role': 'ceph-osd'},
{'role': 'other'}
]
self.serializer.set_deployment_priorities(nodes)
expected_priorities = [
{'role': 'zabbix-server', 'priority': 100},
{'role': 'primary-swift-proxy', 'priority': 200},
{'role': 'swift-proxy', 'priority': 300},
{'role': 'storage', 'priority': 400},
{'role': 'mongo', 'priority': 500},
{'role': 'primary-mongo', 'priority': 600},
{'role': 'primary-controller', 'priority': 700},
{'role': 'controller', 'priority': 800},
{'role': 'controller', 'priority': 800},
{'role': 'ceph-osd', 'priority': 900},
{'role': 'other', 'priority': 900}
]
self.assertEqual(expected_priorities, nodes)
def test_set_deployment_priorities_many_cntrls(self):
nodes = [
{'role': 'zabbix-server'},
{'role': 'primary-swift-proxy'},
{'role': 'swift-proxy'},
{'role': 'storage'},
{'role': 'mongo'},
{'role': 'primary-mongo'},
{'role': 'primary-controller'},
{'role': 'controller'},
{'role': 'controller'},
{'role': 'controller'},
{'role': 'controller'},
{'role': 'controller'},
{'role': 'controller'},
{'role': 'controller'},
{'role': 'controller'},
{'role': 'ceph-osd'},
{'role': 'other'}
]
self.serializer.set_deployment_priorities(nodes)
expected_priorities = [
{'role': 'zabbix-server', 'priority': 100},
{'role': 'primary-swift-proxy', 'priority': 200},
{'role': 'swift-proxy', 'priority': 300},
{'role': 'storage', 'priority': 400},
{'role': 'mongo', 'priority': 500},
{'role': 'primary-mongo', 'priority': 600},
{'role': 'primary-controller', 'priority': 700},
{'role': 'controller', 'priority': 800},
{'role': 'controller', 'priority': 800},
{'role': 'controller', 'priority': 800},
{'role': 'controller', 'priority': 800},
{'role': 'controller', 'priority': 800},
{'role': 'controller', 'priority': 800},
{'role': 'controller', 'priority': 900},
{'role': 'controller', 'priority': 900},
{'role': 'ceph-osd', 'priority': 1000},
{'role': 'other', 'priority': 1000}
]
self.assertEqual(expected_priorities, nodes)
class TestHASerializerPatching(TestNovaOrchestratorHASerializer):
@property
def serializer(self):
return DeploymentHASerializer(PriorityHASerializerPatching())
def test_set_deployment_priorities(self):
nodes = [
{'role': 'zabbix-server'},
{'role': 'primary-swift-proxy'},
{'role': 'swift-proxy'},
{'role': 'storage'},
{'role': 'mongo'},
{'role': 'primary-mongo'},
{'role': 'primary-controller'},
{'role': 'controller'},
{'role': 'controller'},
{'role': 'ceph-osd'},
{'role': 'other'},
{'role': 'other'},
{'role': 'other'},
]
self.serializer.set_deployment_priorities(nodes)
expected_priorities = [
{'role': 'zabbix-server', 'priority': 100},
{'role': 'primary-swift-proxy', 'priority': 200},
{'role': 'swift-proxy', 'priority': 300},
{'role': 'storage', 'priority': 400},
{'role': 'mongo', 'priority': 500},
{'role': 'primary-mongo', 'priority': 600},
{'role': 'primary-controller', 'priority': 700},
{'role': 'controller', 'priority': 800},
{'role': 'controller', 'priority': 900},
{'role': 'ceph-osd', 'priority': 1000},
{'role': 'other', 'priority': 1100},
{'role': 'other', 'priority': 1200},
{'role': 'other', 'priority': 1300},
]
self.assertEqual(expected_priorities, nodes)
def test_set_deployment_priorities_many_cntrls(self):
nodes = [
{'role': 'zabbix-server'},
{'role': 'primary-swift-proxy'},
{'role': 'swift-proxy'},
{'role': 'storage'},
{'role': 'mongo'},
{'role': 'primary-mongo'},
{'role': 'primary-controller'},
{'role': 'controller'},
{'role': 'controller'},
{'role': 'controller'},
{'role': 'controller'},
{'role': 'controller'},
{'role': 'controller'},
{'role': 'controller'},
{'role': 'controller'},
{'role': 'ceph-osd'},
{'role': 'other'}
]
self.serializer.set_deployment_priorities(nodes)
expected_priorities = [
{'role': 'zabbix-server', 'priority': 100},
{'role': 'primary-swift-proxy', 'priority': 200},
{'role': 'swift-proxy', 'priority': 300},
{'role': 'storage', 'priority': 400},
{'role': 'mongo', 'priority': 500},
{'role': 'primary-mongo', 'priority': 600},
{'role': 'primary-controller', 'priority': 700},
{'role': 'controller', 'priority': 800},
{'role': 'controller', 'priority': 900},
{'role': 'controller', 'priority': 1000},
{'role': 'controller', 'priority': 1100},
{'role': 'controller', 'priority': 1200},
{'role': 'controller', 'priority': 1300},
{'role': 'controller', 'priority': 1400},
{'role': 'controller', 'priority': 1500},
{'role': 'ceph-osd', 'priority': 1600},
{'role': 'other', 'priority': 1700}
]
self.assertEqual(expected_priorities, nodes)
# TODO(awoodward): multinode deprecation: probably has duplicates
class TestNeutronOrchestratorSerializer(OrchestratorSerializerTestBase):
def setUp(self):
super(TestNeutronOrchestratorSerializer, self).setUp()
self.new_env_release_version = None
self.cluster = self.create_env('ha_compact')
objects.Cluster.set_primary_roles(self.cluster, self.cluster.nodes)
def create_env(self, mode, segment_type='vlan'):
release_kwargs = {}
if self.new_env_release_version:
release_kwargs['version'] = self.new_env_release_version
cluster = self.env.create(
release_kwargs=release_kwargs,
cluster_kwargs={
'mode': mode,
'net_provider': 'neutron',
'net_segment_type': segment_type
},
nodes_kwargs=[
{'roles': ['controller', 'cinder'], 'pending_addition': True},
{'roles': ['compute', 'cinder'], 'pending_addition': True},
{'roles': ['compute'], 'pending_addition': True},
{'roles': [], 'pending_roles': ['cinder'],
'pending_addition': True}])
cluster_db = self.db.query(Cluster).get(cluster['id'])
objects.NodeCollection.prepare_for_deployment(cluster_db.nodes)
return cluster_db
def serialize_env_w_version(self, version):
self.new_env_release_version = version
cluster = self.create_env(mode='ha_compact')
return create_serializer(cluster).serialize(cluster, cluster.nodes)
def assert_roles_flattened(self, nodes):
self.assertEqual(len(nodes), 6)
self.assert_nodes_with_role(nodes, 'controller', 1)
self.assert_nodes_with_role(nodes, 'compute', 2)
self.assert_nodes_with_role(nodes, 'cinder', 3)
def set_assign_public_to_all_nodes(self, cluster_db, value):
attrs = cluster_db.attributes.editable
attrs['public_network_assignment']['assign_to_all_nodes']['value'] = \
value
resp = self.app.patch(
reverse(
'ClusterAttributesHandler',
kwargs={'cluster_id': cluster_db.id}),
params=jsonutils.dumps({'editable': attrs}),
headers=self.default_headers
)
self.assertEqual(200, resp.status_code)
self.assertEqual(
attrs['public_network_assignment']['assign_to_all_nodes']['value'],
value
)
def test_serialize_nodes(self):
serialized_nodes = self.serializer.serialize_nodes(self.cluster.nodes)
self.assert_roles_flattened(serialized_nodes)
# Each not should be same as result of
# serialize_node function
for serialized_node in serialized_nodes:
node_db = self.db.query(Node).get(int(serialized_node['uid']))
expected_node = self.serializer.serialize_node(
node_db, serialized_node['role'])
self.assertEqual(serialized_node, expected_node)
def test_neutron_vlan_ids_tag_present_on_6_0_env(self):
serialized_nodes = self.serialize_env_w_version('2014.2-6.0')
for node in serialized_nodes:
for item in node['network_scheme']['transformations']:
if 'tags' in item:
self.assertEqual(item['tags'], item['vlan_ids'])
def check_5x_60_neutron_attrs(self, version):
serialized_nodes = self.serialize_env_w_version(version)
for node in serialized_nodes:
self.assertEqual(
{
"network_type": "local",
"segment_id": None,
"router_ext": True,
"physnet": None
},
node['quantum_settings']['predefined_networks'][
'net04_ext']['L2']
)
self.assertFalse(
'physnet1' in node['quantum_settings']['L2']['phys_nets']
)
def test_serialize_neutron_attrs_on_6_0_env(self):
self.check_5x_60_neutron_attrs("2014.2-6.0")
def test_serialize_neutron_attrs_on_5_1_env(self):
self.check_5x_60_neutron_attrs("2014.1.1-5.1")
def check_50x_neutron_attrs(self, version):
serialized_nodes = self.serialize_env_w_version(version)
for node in serialized_nodes:
self.assertEqual(
{
"network_type": "flat",
"segment_id": None,
"router_ext": True,
"physnet": "physnet1"
},
node['quantum_settings']['predefined_networks'][
'net04_ext']['L2']
)
self.assertEqual(
{
"bridge": "br-ex",
"vlan_range": None
},
node['quantum_settings']['L2']['phys_nets']['physnet1']
)
def test_serialize_neutron_attrs_on_5_0_2_env(self):
self.check_50x_neutron_attrs("2014.1.1-5.0.2")
def test_serialize_neutron_attrs_on_5_0_1_env(self):
self.check_50x_neutron_attrs("2014.1.1-5.0.1")
def test_serialize_neutron_attrs_on_5_0_env(self):
self.check_50x_neutron_attrs("2014.1")
def test_serialize_node(self):
node = self.env.create_node(
api=True, cluster_id=self.cluster.id, pending_addition=True)
objects.NodeCollection.prepare_for_deployment(self.cluster.nodes)
node_db = self.db.query(Node).get(node['id'])
serialized_data = self.serializer.serialize_node(node_db, 'controller')
self.assertEqual(serialized_data['role'], 'controller')
self.assertEqual(serialized_data['uid'], str(node_db.id))
self.assertEqual(serialized_data['status'], node_db.status)
self.assertEqual(serialized_data['online'], node_db.online)
self.assertEqual(serialized_data['fqdn'],
'node-%d.%s' % (node_db.id, settings.DNS_DOMAIN))
def test_node_list(self):
assign_public_options = (False, True)
for assign in assign_public_options:
self.set_assign_public_to_all_nodes(self.cluster, assign)
# Clear IPs
for ip in self.db.query(models.IPAddr):
self.db.delete(ip)
self.db.flush()
objects.NodeCollection.prepare_for_deployment(self.cluster.nodes)
node_list = self.serializer.get_common_attrs(self.cluster)['nodes']
roles_w_public_count = 0
# Check right nodes count with right roles
self.assert_roles_flattened(node_list)
# Check common attrs
for node in node_list:
node_db = self.db.query(Node).get(int(node['uid']))
is_public = objects.Node.should_have_public(node_db)
if is_public:
self.assertEqual(node['public_netmask'], '255.255.255.0')
roles_w_public_count += 1
else:
self.assertFalse('public_netmask' in node)
self.assertEqual(node['internal_netmask'], '255.255.255.0')
self.assertEqual(node['storage_netmask'], '255.255.255.0')
self.assertEqual(node['uid'], str(node_db.id))
self.assertEqual(node['name'], 'node-%d' % node_db.id)
self.assertEqual(
node['fqdn'],
'node-%d.%s' % (node_db.id, settings.DNS_DOMAIN))
# We have 6 roles on 4 nodes summarily.
# Only 1 node w 2 roles (controller+cinder) will have public
# when 'assign_to_all_nodes' option is switched off
self.assertEqual(roles_w_public_count, 6 if assign else 2)
# Check uncommon attrs
node_uids = sorted(set([n['uid'] for n in node_list]))
man_ip = [str(ip) for ip in IPRange('192.168.0.1', '192.168.0.4')]
pub_ip = [str(ip) for ip in IPRange('172.16.0.2', '172.16.0.5')]
sto_ip = [str(ip) for ip in IPRange('192.168.1.1', '192.168.1.4')]
expected_list = [
{'roles': ['controller', 'cinder']},
{'roles': ['compute', 'cinder']},
{'roles': ['compute']},
{'roles': ['cinder']}]
for i in range(len(expected_list)):
expected_list[i]['attrs'] = {'uid': node_uids[i]}
if assign:
expected_list[i]['attrs']['public_address'] = pub_ip[i]
if not assign:
expected_list[0]['attrs']['public_address'] = pub_ip[0]
# Check if ips are unique for node and
# they are the same for all nodes roles
used_man_ip, used_pub_ip, used_sto_ip = [], [], []
for expected in expected_list:
attrs = expected['attrs']
ref_node = self.filter_by_uid(node_list, attrs['uid'])[0]
is_public = objects.Node.should_have_public(
objects.Node.get_by_mac_or_uid(node_uid=attrs['uid']))
self.assertTrue(ref_node['internal_address'] in man_ip)
self.assertTrue(ref_node['storage_address'] in sto_ip)
self.assertFalse(ref_node['internal_address'] in used_man_ip)
self.assertFalse(ref_node['storage_address'] in used_sto_ip)
used_man_ip.append(ref_node['internal_address'])
used_sto_ip.append(ref_node['storage_address'])
# Check if pubclic ip field exists
if is_public:
self.assertTrue(ref_node['public_address'] in pub_ip)
self.assertFalse(ref_node['public_address'] in used_pub_ip)
used_pub_ip.append(ref_node['public_address'])
for role in expected['roles']:
nodes = self.filter_by_role(node_list, role)
node = self.filter_by_uid(nodes, attrs['uid'])[0]
if is_public:
self.assertEqual(node['public_address'],
ref_node['public_address'])
else:
self.assertFalse('public_address' in node)
self.assertEqual(node['storage_address'],
ref_node['storage_address'])
self.assertEqual(node['internal_address'],
ref_node['internal_address'])
def test_public_serialization_for_different_roles(self):
assign_public_options = (False, True)
for assign in assign_public_options:
self.set_assign_public_to_all_nodes(self.cluster, assign)
objects.NodeCollection.prepare_for_deployment(self.cluster.nodes)
serialized_nodes = self.serializer.serialize(self.cluster,
self.cluster.nodes)
need_public_nodes_count = set()
for node in serialized_nodes:
node_db = self.db.query(Node).get(int(node['uid']))
is_public = objects.Node.should_have_public(node_db)
if is_public:
need_public_nodes_count.add(int(node['uid']))
net_man = objects.Node.get_network_manager(node_db)
self.assertEqual(
net_man._get_ip_by_network_name(
node_db, 'public') is not None,
is_public
)
for node_attrs in node['nodes']:
is_public_for_role = objects.Node.should_have_public(
objects.Node.get_by_mac_or_uid(
node_uid=int(node_attrs['uid'])))
self.assertEqual('public_address' in node_attrs,
is_public_for_role)
self.assertEqual('public_netmask' in node_attrs,
is_public_for_role)
self.assertEqual(
{
'action': 'add-br',
'name': 'br-ex'
} in node['network_scheme']['transformations'],
is_public
)
self.assertEqual(
{
'action': 'add-patch',
'bridges': ['br-eth0', 'br-ex'],
'trunks': [0]
} in node['network_scheme']['transformations'],
is_public
)
self.assertEqual(
'ex' in node['network_scheme']['roles'],
is_public
)
self.assertEqual(
'br-ex' in node['network_scheme']['endpoints'],
is_public
)
self.assertEqual(len(need_public_nodes_count), 4 if assign else 1)
def test_neutron_l3_gateway(self):
cluster = self.create_env('ha_compact', 'gre')
test_gateway = "192.168.111.255"
public_ng = self.db.query(NetworkGroup).filter(
NetworkGroup.name == 'public'
).filter(
NetworkGroup.group_id ==
objects.Cluster.get_default_group(cluster).id
).first()
public_ng.gateway = test_gateway
self.db.add(public_ng)
self.db.commit()
facts = self.serializer.serialize(cluster, cluster.nodes)
pd_nets = facts[0]["quantum_settings"]["predefined_networks"]
self.assertEqual(
pd_nets["net04_ext"]["L3"]["gateway"],
test_gateway
)
def test_gre_segmentation(self):
cluster = self.create_env('ha_compact', 'gre')
facts = self.serializer.serialize(cluster, cluster.nodes)
for fact in facts:
self.assertEqual(
fact['quantum_settings']['L2']['segmentation_type'], 'gre')
self.assertEqual(
'br-prv' in fact['network_scheme']['endpoints'], False)
self.assertEqual(
'private' in (fact['network_scheme']['roles']), False)
def _create_cluster_for_vlan_splinters(self, segment_type='gre'):
meta = {
'interfaces': [
{'name': 'eth0', 'mac': self.env.generate_random_mac()},
{'name': 'eth1', 'mac': self.env.generate_random_mac()},
{'name': 'eth2', 'mac': self.env.generate_random_mac()},
{'name': 'eth3', 'mac': self.env.generate_random_mac()},
{'name': 'eth4', 'mac': self.env.generate_random_mac()}
]
}
cluster = self.env.create(
cluster_kwargs={
'net_provider': 'neutron',
'net_segment_type': segment_type
},
nodes_kwargs=[
{'roles': ['controller'], 'pending_addition': True,
'meta': meta}
]
)
cluster_db = self.db.query(Cluster).get(cluster['id'])
objects.NodeCollection.prepare_for_deployment(cluster_db.nodes)
return cluster_db
def test_vlan_splinters_disabled(self):
cluster = self._create_cluster_for_vlan_splinters()
cluster_id = cluster.id
editable_attrs = self._make_data_copy(cluster.attributes.editable)
# Remove 'vlan_splinters' attribute and check results.
editable_attrs.pop('vlan_splinters', None)
cluster.attributes.editable = editable_attrs
self.db.commit()
cluster = self.db.query(Cluster).get(cluster_id)
self.assertNotIn('vlan_splinters', editable_attrs)
node = self.serializer.serialize(cluster, cluster.nodes)[0]
interfaces = node['network_scheme']['interfaces']
for iface_attrs in interfaces.itervalues():
self.assertIn('L2', iface_attrs)
L2_attrs = iface_attrs['L2']
self.assertIn('vlan_splinters', L2_attrs)
self.assertEqual(L2_attrs['vlan_splinters'], 'off')
self.assertNotIn('trunks', L2_attrs)
# Set 'vlan_splinters' to 'some_text' and check results.
editable_attrs = self._make_data_copy(cluster.attributes.editable)
editable_attrs['vlan_splinters'] = {'vswitch': {'value': 'some_text'}}
editable_attrs['vlan_splinters']['metadata'] = {'enabled': True}
cluster.attributes.editable = editable_attrs
self.db.commit()
cluster = self.db.query(Cluster).get(cluster_id)
editable_attrs = cluster.attributes.editable
self.assertEqual(editable_attrs['vlan_splinters']['vswitch']['value'],
'some_text')
node = self.serializer.serialize(cluster, cluster.nodes)[0]
interfaces = node['network_scheme']['interfaces']
for iface_attrs in interfaces.itervalues():
self.assertIn('L2', iface_attrs)
L2_attrs = iface_attrs['L2']
self.assertNotIn('vlan_splinters', L2_attrs)
self.assertNotIn('trunks', L2_attrs)
# Set 'vlan_splinters' to 'disabled' and check results.
editable_attrs['vlan_splinters']['metadata']['enabled'] = False
cluster.attributes.editable = editable_attrs
self.db.commit()
cluster = self.db.query(Cluster).get(cluster_id)
editable_attrs = cluster.attributes.editable
self.assertEqual(
editable_attrs['vlan_splinters']['metadata']['enabled'],
False
)
node = self.serializer.serialize(cluster, cluster.nodes)[0]
interfaces = node['network_scheme']['interfaces']
for iface_attrs in interfaces.itervalues():
self.assertIn('L2', iface_attrs)
L2_attrs = iface_attrs['L2']
self.assertIn('vlan_splinters', L2_attrs)
self.assertEqual(L2_attrs['vlan_splinters'], 'off')
self.assertNotIn('trunks', L2_attrs)
def test_kernel_lt_vlan_splinters(self):
cluster = self._create_cluster_for_vlan_splinters()
cluster_id = cluster.id
editable_attrs = self._make_data_copy(cluster.attributes.editable)
# value of kernel-ml should end up with vlan_splinters = off
editable_attrs['vlan_splinters']['metadata']['enabled'] = True
editable_attrs['vlan_splinters']['vswitch']['value'] = 'kernel_lt'
cluster.attributes.editable = editable_attrs
self.db.commit()
cluster = self.db.query(Cluster).get(cluster_id)
editable_attrs = cluster.attributes.editable
self.assertEqual(editable_attrs['vlan_splinters']['vswitch']['value'],
'kernel_lt')
node = self.serializer.serialize(cluster, cluster.nodes)[0]
interfaces = node['network_scheme']['interfaces']
for iface_attrs in interfaces.itervalues():
self.assertIn('L2', iface_attrs)
L2_attrs = iface_attrs['L2']
self.assertIn('vlan_splinters', L2_attrs)
self.assertEqual(L2_attrs['vlan_splinters'], 'off')
self.assertNotIn('trunks', L2_attrs)
def test_hard_vlan_splinters_in_gre(self):
cluster = self._create_cluster_for_vlan_splinters('gre')
editable_attrs = self._make_data_copy(cluster.attributes.editable)
editable_attrs['vlan_splinters']['metadata']['enabled'] = True
editable_attrs['vlan_splinters']['vswitch']['value'] = 'hard'
cluster.attributes.editable = editable_attrs
self.db.commit()
vlan_set = set(
[ng.vlan_start for ng in cluster.network_groups if ng.vlan_start]
)
node = self.serializer.serialize(cluster, cluster.nodes)[0]
interfaces = node['network_scheme']['interfaces']
for iface_attrs in interfaces.itervalues():
self.assertIn('L2', iface_attrs)
L2_attrs = iface_attrs['L2']
self.assertIn('vlan_splinters', L2_attrs)
self.assertEqual(L2_attrs['vlan_splinters'], 'auto')
self.assertIn('trunks', L2_attrs)
self.assertIn(0, L2_attrs['trunks'])
map(
lambda n: vlan_set.remove(n) if n else None,
L2_attrs['trunks']
)
self.assertEqual(len(vlan_set), 0)
def test_hard_vlan_splinters_in_vlan(self):
cluster = self._create_cluster_for_vlan_splinters('vlan')
editable_attrs = self._make_data_copy(cluster.attributes.editable)
editable_attrs['vlan_splinters']['metadata']['enabled'] = True
editable_attrs['vlan_splinters']['vswitch']['value'] = 'hard'
cluster.attributes.editable = editable_attrs
self.db.commit()
vlan_set = set(
[ng.vlan_start for ng in cluster.network_groups if ng.vlan_start]
)
private_vlan_range = cluster.network_config["vlan_range"]
vlan_set.update(xrange(*private_vlan_range))
vlan_set.add(private_vlan_range[1])
node = self.serializer.serialize(cluster, cluster.nodes)[0]
interfaces = node['network_scheme']['interfaces']
for iface_attrs in interfaces.itervalues():
self.assertIn('L2', iface_attrs)
L2_attrs = iface_attrs['L2']
self.assertIn('vlan_splinters', L2_attrs)
self.assertEqual(L2_attrs['vlan_splinters'], 'auto')
self.assertIn('trunks', L2_attrs)
self.assertIn(0, L2_attrs['trunks'])
map(
lambda n: vlan_set.remove(n) if n else None,
L2_attrs['trunks']
)
self.assertEqual(len(vlan_set), 0)
def test_soft_vlan_splinters_in_vlan(self):
cluster = self._create_cluster_for_vlan_splinters('vlan')
editable_attrs = self._make_data_copy(cluster.attributes.editable)
editable_attrs['vlan_splinters']['metadata']['enabled'] = True
editable_attrs['vlan_splinters']['vswitch']['value'] = 'soft'
cluster.attributes.editable = editable_attrs
self.db.commit()
node = self.serializer.serialize(cluster, cluster.nodes)[0]
interfaces = node['network_scheme']['interfaces']
for iface_attrs in interfaces.itervalues():
self.assertIn('L2', iface_attrs)
L2_attrs = iface_attrs['L2']
self.assertIn('vlan_splinters', L2_attrs)
self.assertEqual(L2_attrs['vlan_splinters'], 'auto')
self.assertIn('trunks', L2_attrs)
self.assertEqual(L2_attrs['trunks'], [0])
class TestNeutronOrchestratorHASerializer(OrchestratorSerializerTestBase):
def setUp(self):
super(TestNeutronOrchestratorHASerializer, self).setUp()
self.cluster = self.create_env('ha_compact')
objects.Cluster.set_primary_roles(self.cluster, self.cluster.nodes)
def create_env(self, mode):
cluster = self.env.create(
cluster_kwargs={
'mode': mode,
'net_provider': 'neutron',
'net_segment_type': 'vlan'
},
nodes_kwargs=[
{'roles': ['controller'], 'pending_addition': True},
{'roles': ['controller'], 'pending_addition': True},
{'roles': ['controller', 'cinder'], 'pending_addition': True},
{'roles': ['compute', 'cinder'], 'pending_addition': True},
{'roles': ['compute'], 'pending_addition': True},
{'roles': ['cinder'], 'pending_addition': True}
]
)
cluster_db = self.db.query(Cluster).get(cluster['id'])
objects.NodeCollection.prepare_for_deployment(cluster_db.nodes)
return cluster_db
@property
def serializer(self):
return DeploymentHASerializer(PriorityHASerializer50())
def test_node_list(self):
serialized_nodes = self.serializer.node_list(self.cluster.nodes)
for node in serialized_nodes:
# Each node has swift_zone
self.assertEqual(node['swift_zone'], node['uid'])
def test_get_common_attrs(self):
attrs = self.serializer.get_common_attrs(self.cluster)
# vips
self.assertEqual(attrs['management_vip'], '192.168.0.7')
self.assertTrue(
re.compile('172.16.0.[1-9]').match(attrs['public_vip']))
# last_contrller
controllers = self.get_controllers(self.cluster.id)
self.assertEqual(attrs['last_controller'],
'node-%d' % controllers[-1].id)
# primary_controller
controllers = self.filter_by_role(attrs['nodes'], 'primary-controller')
self.assertEqual(controllers[0]['role'], 'primary-controller')
# mountpoints and mp attrs
self.assertEqual(
attrs['mp'],
[{'point': '1', 'weight': '1'},
{'point': '2', 'weight': '2'}])
class TestNeutronOrchestratorSerializerBonds(OrchestratorSerializerTestBase):
def create_env(self, nodes_count=2, nic_count=3, segment_type='vlan'):
cluster = self.env.create_cluster(
net_provider='neutron',
net_segment_type=segment_type)
self.env.create_nodes_w_interfaces_count(
nodes_count=1,
if_count=nic_count,
roles=['controller', 'cinder'],
pending_addition=True,
cluster_id=cluster['id'])
self.env.create_nodes_w_interfaces_count(
nodes_count=nodes_count - 1,
if_count=nic_count,
roles=['compute'],
pending_addition=True,
cluster_id=cluster['id'])
cluster_db = self.db.query(Cluster).get(cluster['id'])
return cluster_db
def check_add_bond_msg_lacp(self, msg):
self.assertEqual(
msg,
{
'action': 'add-bond',
'bridge': 'br-ovsbond0',
'interfaces': ['eth1', 'eth2'],
'name': 'ovsbond0',
'properties': ['lacp=active', 'bond_mode=balance-tcp']
})
def check_add_bond_msg_non_lacp(self, msg, mode):
self.assertEqual(
msg,
{
'action': 'add-bond',
'bridge': 'br-ovsbond0',
'interfaces': ['eth1', 'eth2'],
'name': 'ovsbond0',
'properties': ['bond_mode={0}'.format(mode)]
})
def check_bond_with_mode(self, mode):
cluster = self.create_env()
for node in cluster.nodes:
self.env.make_bond_via_api('ovsbond0',
mode,
['eth1', 'eth2'],
node.id)
facts = self.serialize(cluster)
for node in facts:
transforms = node['network_scheme']['transformations']
bonds = filter(lambda t: t['action'] == 'add-bond',
transforms)
self.assertEqual(len(bonds), 1)
if mode == OVS_BOND_MODES.lacp_balance_tcp:
self.check_add_bond_msg_lacp(bonds[0])
else:
self.check_add_bond_msg_non_lacp(bonds[0], mode)
def test_bonds_serialization(self):
for mode in OVS_BOND_MODES:
self.check_bond_with_mode(mode)
class TestCephOsdImageOrchestratorSerialize(OrchestratorSerializerTestBase):
def setUp(self):
super(TestCephOsdImageOrchestratorSerialize, self).setUp()
cluster = self.env.create(
cluster_kwargs={
'mode': 'multinode'},
nodes_kwargs=[
{'roles': ['controller', 'ceph-osd']}])
self.app.patch(
reverse(
'ClusterAttributesHandler',
kwargs={'cluster_id': cluster['id']}),
params=jsonutils.dumps({
'editable': {'storage': {'images_ceph': {'value': True}}}}),
headers=self.default_headers)
self.cluster = self.db.query(Cluster).get(cluster['id'])
def test_glance_image_cache_max_size(self):
data = self.serialize(self.cluster)
self.assertEqual(len(data), 2)
# one node - 2 roles
self.assertEqual(data[0]['uid'], data[1]['uid'])
self.assertEqual(data[0]['glance']['image_cache_max_size'], '0')
self.assertEqual(data[1]['glance']['image_cache_max_size'], '0')
class TestCephPgNumOrchestratorSerialize(OrchestratorSerializerTestBase):
def create_env(self, nodes, osd_pool_size='2'):
cluster = self.env.create(
cluster_kwargs={
'mode': 'multinode'},
nodes_kwargs=nodes)
self.app.patch(
reverse(
'ClusterAttributesHandler',
kwargs={'cluster_id': cluster['id']}),
params=jsonutils.dumps(
{'editable': {
'storage': {
'osd_pool_size': {'value': osd_pool_size}}}}),
headers=self.default_headers)
return self.db.query(Cluster).get(cluster['id'])
def test_pg_num_no_osd_nodes(self):
cluster = self.create_env([
{'roles': ['controller']}])
data = self.serialize(cluster)
self.assertEqual(data[0]['storage']['pg_num'], 128)
def test_pg_num_1_osd_node(self):
cluster = self.create_env([
{'roles': ['controller', 'ceph-osd']}])
data = self.serialize(cluster)
self.assertEqual(data[0]['storage']['pg_num'], 256)
def test_pg_num_1_osd_node_repl_4(self):
cluster = self.create_env(
[{'roles': ['controller', 'ceph-osd']}],
'4')
data = self.serialize(cluster)
self.assertEqual(data[0]['storage']['pg_num'], 128)
def test_pg_num_3_osd_nodes(self):
cluster = self.create_env([
{'roles': ['controller', 'ceph-osd']},
{'roles': ['compute', 'ceph-osd']},
{'roles': ['compute', 'ceph-osd']}])
data = self.serialize(cluster)
self.assertEqual(data[0]['storage']['pg_num'], 512)
class TestMongoNodesSerialization(OrchestratorSerializerTestBase):
def create_env(self):
cluster = self.env.create(
cluster_kwargs={
'mode': 'ha_compact',
'network_manager': 'FlatDHCPManager'
},
nodes_kwargs=[
{'roles': ['mongo'], 'pending_addition': True},
{'roles': ['mongo'], 'pending_addition': True},
{'roles': ['mongo'], 'pending_addition': True}
]
)
cluster = self.db.query(Cluster).get(cluster['id'])
objects.NodeCollection.prepare_for_deployment(cluster.nodes)
return cluster
@property
def serializer_ha(self):
return DeploymentHASerializer(PriorityHASerializer50())
@property
def serializer_mn(self):
return DeploymentMultinodeSerializer(PriorityMultinodeSerializer50())
def test_mongo_roles_equals_in_defferent_modes(self):
cluster = self.create_env()
ha_nodes = self.serializer_ha.serialize_nodes(cluster.nodes)
mn_nodes = self.serializer_mn.serialize_nodes(cluster.nodes)
self.assertEqual(mn_nodes, ha_nodes)
def test_primary_node_selected(self):
cluster = self.create_env()
ha_nodes = self.serializer_ha.serialize_nodes(cluster.nodes)
mn_nodes = self.serializer_mn.serialize_nodes(cluster.nodes)
def primary_nodes_count(nodes):
return len(filter(lambda x: x['role'] == 'primary-mongo', nodes))
self.assertEqual(1, primary_nodes_count(ha_nodes))
self.assertEqual(1, primary_nodes_count(mn_nodes))
class TestRepoAndPuppetDataSerialization(OrchestratorSerializerTestBase):
orch_data = {
"repo_metadata": {
"nailgun":
"http://10.20.0.2:8080/centos-5.0/centos/x86_64/"
},
"puppet_modules_source":
"rsync://10.20.0.2/puppet/release/5.0/modules",
"puppet_manifests_source":
"rsync://10.20.0.2/puppet/release/5.0/manifests"
}
def test_repo_and_puppet_data_w_orch_data(self):
release_id = self.env.create_release().id
resp = self.app.put(
reverse('ReleaseHandler', kwargs={'obj_id': release_id}),
params=jsonutils.dumps(
{
"orchestrator_data": self.orch_data
}
),
headers=self.default_headers,
expect_errors=True
)
self.assertEqual(200, resp.status_code)
cluster_id = self.env.create(
cluster_kwargs={
'release_id': release_id
},
nodes_kwargs=[
{'roles': ['controller'], 'pending_addition': True}
]
)["id"]
cluster = self.db.query(Cluster).get(cluster_id)
objects.NodeCollection.prepare_for_deployment(cluster.nodes)
facts = self.serializer.serialize(cluster, cluster.nodes)
self.assertEqual(1, len(facts))
fact = facts[0]
self.assertEqual(
fact['repo_metadata'],
{
'nailgun': 'http://10.20.0.2:8080'
'/centos-5.0/centos/x86_64/'
}
)
self.assertEqual(
fact['puppet_modules_source'],
'rsync://10.20.0.2/puppet/release/5.0/modules'
)
self.assertEqual(
fact['puppet_manifests_source'],
'rsync://10.20.0.2/puppet/release/5.0/manifests'
)
def test_repo_and_puppet_data_wo_orch_data(self):
release_id = self.env.create_release().id
cluster_id = self.env.create(
cluster_kwargs={
'release_id': release_id
},
nodes_kwargs=[
{'roles': ['controller'], 'pending_addition': True}
]
)["id"]
cluster = self.db.query(Cluster).get(cluster_id)
objects.NodeCollection.prepare_for_deployment(cluster.nodes)
facts = self.serializer.serialize(cluster, cluster.nodes)
self.assertEqual(1, len(facts))
fact = facts[0]
self.assertEqual(
fact['repo_metadata'],
{
'2014.2-6.0': 'http://127.0.0.1:8080/2014.2-6.0/centos/x86_64'
}
)
self.assertEqual(
fact['puppet_modules_source'],
'rsync://127.0.0.1:/puppet/2014.2-6.0/modules/'
)
self.assertEqual(
fact['puppet_manifests_source'],
'rsync://127.0.0.1:/puppet/2014.2-6.0/manifests/'
)
def test_orch_data_w_replaced_deployment_info(self):
replaced_deployment_info = [{'repo_metadata': 'custom_stuff'}]
release = self.env.create_release()
self.env.create(
cluster_kwargs={'release_id': release.id},
nodes_kwargs=[
{'roles': ['controller'], 'pending_addition': True}
])
objects.Release.update_orchestrator_data(release, self.orch_data)
self.db.flush()
self.db.refresh(release)
self.env.nodes[0].replaced_deployment_info = replaced_deployment_info
facts = self.serializer.serialize(
self.env.clusters[0], self.env.nodes)
self.assertEqual(facts[0]['repo_metadata'],
self.orch_data['repo_metadata'])
self.assertEqual(facts[0]['puppet_modules_source'],
self.orch_data['puppet_modules_source'])
self.assertEqual(facts[0]['puppet_manifests_source'],
self.orch_data['puppet_manifests_source'])
class TestNSXOrchestratorSerializer(OrchestratorSerializerTestBase):
def setUp(self):
super(TestNSXOrchestratorSerializer, self).setUp()
self.cluster = self.create_env('ha_compact')
def create_env(self, mode, segment_type='gre'):
cluster = self.env.create(
cluster_kwargs={
'mode': mode,
'net_provider': 'neutron',
'net_segment_type': segment_type
},
nodes_kwargs=[
{'roles': ['controller'], 'pending_addition': True},
{'roles': ['compute'], 'pending_addition': True},
]
)
cluster_db = self.db.query(Cluster).get(cluster['id'])
editable_attrs = self._make_data_copy(cluster_db.attributes.editable)
nsx_attrs = editable_attrs.setdefault('nsx_plugin', {})
nsx_attrs.setdefault('metadata', {})['enabled'] = True
cluster_db.attributes.editable = editable_attrs
self.db.commit()
cluster_db = self.db.query(Cluster).get(cluster['id'])
objects.NodeCollection.prepare_for_deployment(cluster_db.nodes)
return cluster_db
def test_serialize_node(self):
serialized_data = self.serializer.serialize(self.cluster,
self.cluster.nodes)[0]
q_settings = serialized_data['quantum_settings']
self.assertIn('L2', q_settings)
self.assertIn('provider', q_settings['L2'])
self.assertEqual(q_settings['L2']['provider'], 'nsx')
l3_settings = q_settings['L3']
self.assertIn('dhcp_agent', l3_settings)
self.assertIn('enable_isolated_metadata', l3_settings['dhcp_agent'])
self.assertEqual(l3_settings['dhcp_agent']['enable_isolated_metadata'],
True)
self.assertIn('enable_metadata_network', l3_settings['dhcp_agent'])
self.assertEqual(l3_settings['dhcp_agent']['enable_metadata_network'],
True)
|
py | b40e63747e90e37223f74d599d20511e6db40305 | # test_ioa_exclusions.py
# This class tests the ioa_exclusions service class
import os
import sys
# Authentication via test_authorization.py
from tests import test_authorization as Authorization
# Import our sibling src folder into the path
sys.path.append(os.path.abspath('src'))
# Classes to test - manually imported from sibling folder
from falconpy.ioa_exclusions import IOA_Exclusions as FalconIOAE
auth = Authorization.TestAuthorization()
auth.getConfig()
falcon = FalconIOAE(creds={"client_id": auth.config["falcon_client_id"],
"client_secret": auth.config["falcon_client_secret"]
})
AllowedResponses = [200, 429] # Adding rate-limiting as an allowed response for now
class TestIOAExclusions:
def serviceIOAE_ListExclusions(self):
returned = False
if falcon.queryIOAExclusionsV1(limit=1, offset=2, pizza="IsDelicious")["status_code"] in AllowedResponses:
returned = True
return returned
def serviceIOAE_GenerateErrors(self):
falcon.base_url = "nowhere"
errorChecks = True
commandList = [
["getIOAExclusionsV1", "ids='12345678'"],
["createIOAExclusionsV1", "body={}"],
["updateIOAExclusionsV1", "body={}"],
["deleteIOAExclusionsV1", "ids='12345678'"]
]
for cmd in commandList:
if eval("falcon.{}({})['status_code']".format(cmd[0], cmd[1])) != 500:
errorChecks = False
return errorChecks
def test_Find(self):
assert self.serviceIOAE_ListExclusions() is True
def test_Errors(self):
assert self.serviceIOAE_GenerateErrors() is True
|
py | b40e6505ae72aacf5675bd92b42f35a975356539 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
class EventLoaderTest(googletest.TestCase):
def test_log(self):
# Just check that logging works without raising an exception.
logging.error("test log message")
if __name__ == "__main__":
googletest.main()
|
py | b40e651beaca5928f6ec28378d072b56578206f2 | #!/usr/bin/env python
#
# Use the raw transactions API to spend IONs received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a iond or ion-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the ioncoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/ioncoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "ioncoin")
return os.path.expanduser("~/.ioncoin")
def read_bitcoin_config(dbdir):
"""Read the ioncoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "ioncoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a ion JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 51475 if testnet else 51473
connect = "http://%s:%[email protected]:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the iond we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(iond):
info = iond.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
iond.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = iond.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(iond):
address_summary = dict()
address_to_account = dict()
for info in iond.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = iond.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = iond.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-ion-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(iond, fromaddresses, toaddress, amount, fee):
all_coins = list_available(iond)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to iond.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = iond.createrawtransaction(inputs, outputs)
signed_rawtx = iond.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(iond, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = iond.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(iond, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = iond.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(iond, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get IONs from")
parser.add_option("--to", dest="to", default=None,
help="address to get send IONs to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of ioncoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
iond = connect_JSON(config)
if options.amount is None:
address_summary = list_available(iond)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(iond) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(iond, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(iond, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = iond.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
|
py | b40e65ee3921f967b9f2aa4d5ce34470e13ff2df | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Memtester(MakefilePackage):
"""A userspace utility for testing the memory subsystem for faults."""
homepage = "http://pyropus.ca/software/memtester/"
url = "http://pyropus.ca/software/memtester/old-versions/memtester-4.3.0.tar.gz"
version('4.3.0', sha256='f9dfe2fd737c38fad6535bbab327da9a21f7ce4ea6f18c7b3339adef6bf5fd88')
version('4.2.2', sha256='a494569d58d642c796332a1b7f3b4b86845b52da66c15c96fbeecd74e48dae8e')
version('4.2.1', sha256='3433e1c757e56457610f5a97bf1a2d612c609290eba5183dd273e070134a21d2')
version('4.2.0', sha256='cb9d5437a0c429d18500bddef93084bb2fead0d5ccfedfd00ee28ff118e52695')
version('4.1.3', sha256='ac56f0b6d6d6e58bcf2a3fa7f2c9b29894f5177871f21115a1906c535106acf6')
def edit(self, spec, prefix):
makefile = FileFilter("Makefile")
makefile.filter("INSTALLPATH\t= /usr/local",
"INSTALLPATH\t= {0}".format(self.prefix))
|
py | b40e68431fb6363569da1c909a9fcdf0f6a99f43 | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
import warnings
from abc import abstractmethod
class BasePoints(object):
"""Base class for Points.
Args:
tensor (torch.Tensor | np.ndarray | list): a N x points_dim matrix.
points_dim (int): Number of the dimension of a point.
Each row is (x, y, z). Default to 3.
attribute_dims (dict): Dictionary to indicate the meaning of extra
dimension. Default to None.
Attributes:
tensor (torch.Tensor): Float matrix of N x points_dim.
points_dim (int): Integer indicating the dimension of a point.
Each row is (x, y, z, ...).
attribute_dims (bool): Dictionary to indicate the meaning of extra
dimension. Default to None.
rotation_axis (int): Default rotation axis for points rotation.
"""
def __init__(self, tensor, points_dim=3, attribute_dims=None):
if isinstance(tensor, torch.Tensor):
device = tensor.device
else:
device = torch.device('cpu')
tensor = torch.as_tensor(tensor, dtype=torch.float32, device=device)
if tensor.numel() == 0:
# Use reshape, so we don't end up creating a new tensor that
# does not depend on the inputs (and consequently confuses jit)
tensor = tensor.reshape((0, points_dim)).to(
dtype=torch.float32, device=device)
assert tensor.dim() == 2 and tensor.size(-1) == \
points_dim, tensor.size()
self.tensor = tensor
self.points_dim = points_dim
self.attribute_dims = attribute_dims
self.rotation_axis = 0
@property
def coord(self):
"""torch.Tensor: Coordinates of each point with size (N, 3)."""
return self.tensor[:, :3]
@coord.setter
def coord(self, tensor):
"""Set the coordinates of each point."""
try:
tensor = tensor.reshape(self.shape[0], 3)
except (RuntimeError, ValueError): # for torch.Tensor and np.ndarray
raise ValueError(f'got unexpected shape {tensor.shape}')
if not isinstance(tensor, torch.Tensor):
tensor = self.tensor.new_tensor(tensor)
self.tensor[:, :3] = tensor
@property
def height(self):
"""torch.Tensor: A vector with height of each point."""
if self.attribute_dims is not None and \
'height' in self.attribute_dims.keys():
return self.tensor[:, self.attribute_dims['height']]
else:
return None
@height.setter
def height(self, tensor):
"""Set the height of each point."""
try:
tensor = tensor.reshape(self.shape[0])
except (RuntimeError, ValueError): # for torch.Tensor and np.ndarray
raise ValueError(f'got unexpected shape {tensor.shape}')
if not isinstance(tensor, torch.Tensor):
tensor = self.tensor.new_tensor(tensor)
if self.attribute_dims is not None and \
'height' in self.attribute_dims.keys():
self.tensor[:, self.attribute_dims['height']] = tensor
else:
# add height attribute
if self.attribute_dims is None:
self.attribute_dims = dict()
attr_dim = self.shape[1]
self.tensor = torch.cat([self.tensor, tensor.unsqueeze(1)], dim=1)
self.attribute_dims.update(dict(height=attr_dim))
self.points_dim += 1
@property
def color(self):
"""torch.Tensor: A vector with color of each point."""
if self.attribute_dims is not None and \
'color' in self.attribute_dims.keys():
return self.tensor[:, self.attribute_dims['color']]
else:
return None
@color.setter
def color(self, tensor):
"""Set the color of each point."""
try:
tensor = tensor.reshape(self.shape[0], 3)
except (RuntimeError, ValueError): # for torch.Tensor and np.ndarray
raise ValueError(f'got unexpected shape {tensor.shape}')
if tensor.max() >= 256 or tensor.min() < 0:
warnings.warn('point got color value beyond [0, 255]')
if not isinstance(tensor, torch.Tensor):
tensor = self.tensor.new_tensor(tensor)
if self.attribute_dims is not None and \
'color' in self.attribute_dims.keys():
self.tensor[:, self.attribute_dims['color']] = tensor
else:
# add color attribute
if self.attribute_dims is None:
self.attribute_dims = dict()
attr_dim = self.shape[1]
self.tensor = torch.cat([self.tensor, tensor], dim=1)
self.attribute_dims.update(
dict(color=[attr_dim, attr_dim + 1, attr_dim + 2]))
self.points_dim += 3
@property
def shape(self):
"""torch.Shape: Shape of points."""
return self.tensor.shape
def shuffle(self):
"""Shuffle the points.
Returns:
torch.Tensor: The shuffled index.
"""
idx = torch.randperm(self.__len__(), device=self.tensor.device)
self.tensor = self.tensor[idx]
return idx
def rotate(self, rotation, axis=None):
"""Rotate points with the given rotation matrix or angle.
Args:
rotation (float, np.ndarray, torch.Tensor): Rotation matrix
or angle.
axis (int): Axis to rotate at. Defaults to None.
"""
if not isinstance(rotation, torch.Tensor):
rotation = self.tensor.new_tensor(rotation)
assert rotation.shape == torch.Size([3, 3]) or \
rotation.numel() == 1, f'invalid rotation shape {rotation.shape}'
if axis is None:
axis = self.rotation_axis
if rotation.numel() == 1:
rot_sin = torch.sin(rotation)
rot_cos = torch.cos(rotation)
if axis == 1:
rot_mat_T = rotation.new_tensor([[rot_cos, 0, -rot_sin],
[0, 1, 0],
[rot_sin, 0, rot_cos]])
elif axis == 2 or axis == -1:
rot_mat_T = rotation.new_tensor([[rot_cos, -rot_sin, 0],
[rot_sin, rot_cos, 0],
[0, 0, 1]])
elif axis == 0:
rot_mat_T = rotation.new_tensor([[1, 0, 0],
[0, rot_cos, -rot_sin],
[0, rot_sin, rot_cos]])
else:
raise ValueError('axis should in range')
rot_mat_T = rot_mat_T.T
elif rotation.numel() == 9:
rot_mat_T = rotation
else:
raise NotImplementedError
self.tensor[:, :3] = self.tensor[:, :3] @ rot_mat_T
return rot_mat_T
@abstractmethod
def flip(self, bev_direction='horizontal'):
"""Flip the points in BEV along given BEV direction."""
pass
def translate(self, trans_vector):
"""Translate points with the given translation vector.
Args:
trans_vector (np.ndarray, torch.Tensor): Translation
vector of size 3 or nx3.
"""
if not isinstance(trans_vector, torch.Tensor):
trans_vector = self.tensor.new_tensor(trans_vector)
trans_vector = trans_vector.squeeze(0)
if trans_vector.dim() == 1:
assert trans_vector.shape[0] == 3
elif trans_vector.dim() == 2:
assert trans_vector.shape[0] == self.tensor.shape[0] and \
trans_vector.shape[1] == 3
else:
raise NotImplementedError(
f'Unsupported translation vector of shape {trans_vector.shape}'
)
self.tensor[:, :3] += trans_vector
def in_range_3d(self, point_range):
"""Check whether the points are in the given range.
Args:
point_range (list | torch.Tensor): The range of point
(x_min, y_min, z_min, x_max, y_max, z_max)
Note:
In the original implementation of SECOND, checking whether
a box in the range checks whether the points are in a convex
polygon, we try to reduce the burden for simpler cases.
Returns:
torch.Tensor: A binary vector indicating whether each point is \
inside the reference range.
"""
in_range_flags = ((self.tensor[:, 0] > point_range[0])
& (self.tensor[:, 1] > point_range[1])
& (self.tensor[:, 2] > point_range[2])
& (self.tensor[:, 0] < point_range[3])
& (self.tensor[:, 1] < point_range[4])
& (self.tensor[:, 2] < point_range[5]))
return in_range_flags
@abstractmethod
def in_range_bev(self, point_range):
"""Check whether the points are in the given range.
Args:
point_range (list | torch.Tensor): The range of point
in order of (x_min, y_min, x_max, y_max).
Returns:
torch.Tensor: Indicating whether each point is inside \
the reference range.
"""
pass
@abstractmethod
def convert_to(self, dst, rt_mat=None):
"""Convert self to ``dst`` mode.
Args:
dst (:obj:`CoordMode`): The target Box mode.
rt_mat (np.ndarray | torch.Tensor): The rotation and translation
matrix between different coordinates. Defaults to None.
The conversion from `src` coordinates to `dst` coordinates
usually comes along the change of sensors, e.g., from camera
to LiDAR. This requires a transformation matrix.
Returns:
:obj:`BasePoints`: The converted box of the same type \
in the `dst` mode.
"""
pass
def scale(self, scale_factor):
"""Scale the points with horizontal and vertical scaling factors.
Args:
scale_factors (float): Scale factors to scale the points.
"""
self.tensor[:, :3] *= scale_factor
def __getitem__(self, item):
"""
Note:
The following usage are allowed:
1. `new_points = points[3]`:
return a `Points` that contains only one point.
2. `new_points = points[2:10]`:
return a slice of points.
3. `new_points = points[vector]`:
where vector is a torch.BoolTensor with `length = len(points)`.
Nonzero elements in the vector will be selected.
4. `new_points = points[3:11, vector]`:
return a slice of points and attribute dims.
5. `new_points = points[4:12, 2]`:
return a slice of points with single attribute.
Note that the returned Points might share storage with this Points,
subject to Pytorch's indexing semantics.
Returns:
:obj:`BasePoints`: A new object of \
:class:`BasePoints` after indexing.
"""
original_type = type(self)
if isinstance(item, int):
return original_type(
self.tensor[item].view(1, -1),
points_dim=self.points_dim,
attribute_dims=self.attribute_dims)
elif isinstance(item, tuple) and len(item) == 2:
if isinstance(item[1], slice):
start = 0 if item[1].start is None else item[1].start
stop = self.tensor.shape[1] if \
item[1].stop is None else item[1].stop
step = 1 if item[1].step is None else item[1].step
item = list(item)
item[1] = list(range(start, stop, step))
item = tuple(item)
elif isinstance(item[1], int):
item = list(item)
item[1] = [item[1]]
item = tuple(item)
p = self.tensor[item[0], item[1]]
keep_dims = list(
set(item[1]).intersection(set(range(3, self.tensor.shape[1]))))
if self.attribute_dims is not None:
attribute_dims = self.attribute_dims.copy()
for key in self.attribute_dims.keys():
cur_attribute_dims = attribute_dims[key]
if isinstance(cur_attribute_dims, int):
cur_attribute_dims = [cur_attribute_dims]
intersect_attr = list(
set(cur_attribute_dims).intersection(set(keep_dims)))
if len(intersect_attr) == 1:
attribute_dims[key] = intersect_attr[0]
elif len(intersect_attr) > 1:
attribute_dims[key] = intersect_attr
else:
attribute_dims.pop(key)
else:
attribute_dims = None
elif isinstance(item, (slice, np.ndarray, torch.Tensor)):
p = self.tensor[item]
attribute_dims = self.attribute_dims
else:
raise NotImplementedError(f'Invalid slice {item}!')
assert p.dim() == 2, \
f'Indexing on Points with {item} failed to return a matrix!'
return original_type(
p, points_dim=p.shape[1], attribute_dims=attribute_dims)
def __len__(self):
"""int: Number of points in the current object."""
return self.tensor.shape[0]
def __repr__(self):
"""str: Return a strings that describes the object."""
return self.__class__.__name__ + '(\n ' + str(self.tensor) + ')'
@classmethod
def cat(cls, points_list):
"""Concatenate a list of Points into a single Points.
Args:
points_list (list[:obj:`BasePoints`]): List of points.
Returns:
:obj:`BasePoints`: The concatenated Points.
"""
assert isinstance(points_list, (list, tuple))
if len(points_list) == 0:
return cls(torch.empty(0))
assert all(isinstance(points, cls) for points in points_list)
# use torch.cat (v.s. layers.cat)
# so the returned points never share storage with input
cat_points = cls(
torch.cat([p.tensor for p in points_list], dim=0),
points_dim=points_list[0].tensor.shape[1],
attribute_dims=points_list[0].attribute_dims)
return cat_points
def to(self, device):
"""Convert current points to a specific device.
Args:
device (str | :obj:`torch.device`): The name of the device.
Returns:
:obj:`BasePoints`: A new boxes object on the \
specific device.
"""
original_type = type(self)
return original_type(
self.tensor.to(device),
points_dim=self.points_dim,
attribute_dims=self.attribute_dims)
def clone(self):
"""Clone the Points.
Returns:
:obj:`BasePoints`: Box object with the same properties \
as self.
"""
original_type = type(self)
return original_type(
self.tensor.clone(),
points_dim=self.points_dim,
attribute_dims=self.attribute_dims)
@property
def device(self):
"""str: The device of the points are on."""
return self.tensor.device
def __iter__(self):
"""Yield a point as a Tensor of shape (4,) at a time.
Returns:
torch.Tensor: A point of shape (4,).
"""
yield from self.tensor
def new_point(self, data):
"""Create a new point object with data.
The new point and its tensor has the similar properties \
as self and self.tensor, respectively.
Args:
data (torch.Tensor | numpy.array | list): Data to be copied.
Returns:
:obj:`BasePoints`: A new point object with ``data``, \
the object's other properties are similar to ``self``.
"""
new_tensor = self.tensor.new_tensor(data) \
if not isinstance(data, torch.Tensor) else data.to(self.device)
original_type = type(self)
return original_type(
new_tensor,
points_dim=self.points_dim,
attribute_dims=self.attribute_dims)
|
py | b40e687386e3f9f2f09d6494eba26494e94af8ef | def compute() -> int:
months = [31, 0, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
day = 2
result = 0
for year in range(1_901, 2_001):
months[1] = 28 + year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)
for month in range(12):
day += months[month] % 7
if day % 7 == 0:
result += 1
return result
|
py | b40e68f90e8e5c3188502e20f4e33421307bd68c | """
WSGI config for {{project_name}} project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "{{project_name}}.settings")
application = get_wsgi_application()
|
py | b40e6a02bff587f9f4c624e3ccfe6ba0f023c3ba | # Copyright 2017 reinforce.io. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange
import tensorflow as tf
from tensorforce import util, TensorForceError
from tensorforce.models import QModel
from tensorforce.core.networks import Linear
class QNAFModel(QModel):
"""
Implements normalized advantage functions (NAF), somtimes also called
continuous Q-learning.
"""
def __init__(
self,
states,
actions,
scope,
device,
saver,
summarizer,
execution,
batching_capacity,
variable_noise,
states_preprocessing,
actions_exploration,
reward_preprocessing,
update_mode,
memory,
optimizer,
discount,
network,
distributions,
entropy_regularization,
target_sync_frequency,
target_update_weight,
double_q_model,
huber_loss
):
if any(action['type'] != 'float' or 'min_value' in action or 'max_value' in action for action in actions.values()):
raise TensorForceError("Only unconstrained float actions valid for NAFModel.")
super(QNAFModel, self).__init__(
states=states,
actions=actions,
scope=scope,
device=device,
saver=saver,
summarizer=summarizer,
execution=execution,
batching_capacity=batching_capacity,
variable_noise=variable_noise,
states_preprocessing=states_preprocessing,
actions_exploration=actions_exploration,
reward_preprocessing=reward_preprocessing,
update_mode=update_mode,
memory=memory,
optimizer=optimizer,
discount=discount,
network=network,
distributions=distributions,
entropy_regularization=entropy_regularization,
target_sync_frequency=target_sync_frequency,
target_update_weight=target_update_weight,
double_q_model=double_q_model,
huber_loss=huber_loss
)
def setup_components_and_tf_funcs(self, custom_getter=None):
super(QNAFModel, self).setup_components_and_tf_funcs(custom_getter)
self.state_values = dict()
self.l_entries = dict()
for name, action in self.actions_spec.items():
num_action = util.prod(action['shape'])
self.state_values[name] = Linear(size=num_action, scope='state-value')
self.l_entries[name] = Linear(size=(num_action * (num_action - 1) // 2), scope='l-entries')
def tf_q_value(self, embedding, distr_params, action, name):
num_action = util.prod(self.actions_spec[name]['shape'])
mean, stddev, _ = distr_params
flat_mean = tf.reshape(tensor=mean, shape=(-1, num_action))
flat_stddev = tf.reshape(tensor=stddev, shape=(-1, num_action))
# Advantage computation
# Network outputs entries of lower triangular matrix L
if self.l_entries[name] is None:
l_matrix = flat_stddev
l_matrix = tf.exp(l_matrix)
else:
l_matrix = tf.map_fn(fn=tf.diag, elems=flat_stddev)
l_entries = self.l_entries[name].apply(x=embedding)
l_entries = tf.exp(l_entries)
offset = 0
columns = list()
for zeros, size in enumerate(xrange(num_action - 1, -1, -1), 1):
column = tf.pad(tensor=l_entries[:, offset: offset + size], paddings=((0, 0), (zeros, 0)))
columns.append(column)
offset += size
l_matrix += tf.stack(values=columns, axis=1)
# P = LL^T
p_matrix = tf.matmul(a=l_matrix, b=tf.transpose(a=l_matrix, perm=(0, 2, 1)))
# A = -0.5 (a - mean)P(a - mean)
flat_action = tf.reshape(tensor=action, shape=(-1, num_action))
difference = flat_action - flat_mean
advantage = tf.matmul(a=p_matrix, b=tf.expand_dims(input=difference, axis=2))
advantage = tf.matmul(a=tf.expand_dims(input=difference, axis=1), b=advantage)
advantage = tf.squeeze(input=(-advantage / 2.0), axis=2)
# Q = A + V
# State-value function
state_value = self.state_values[name].apply(x=embedding)
q_value = state_value + advantage
return tf.reshape(tensor=q_value, shape=((-1,) + self.actions_spec[name]['shape']))
def tf_loss_per_instance(self, states, internals, actions, terminal, reward, next_states, next_internals, update, reference=None):
# Michael: doubling this function because NAF needs V'(s) not Q'(s), see comment below
embedding = self.network.apply(x=states, internals=internals, update=update)
# Both networks can use the same internals, could that be a problem?
# Otherwise need to handle internals indices correctly everywhere
target_embedding = self.target_network.apply(
x=next_states,
internals=next_internals,
update=update
)
deltas = list()
for name, distribution in self.distributions.items():
target_distribution = self.target_distributions[name]
distr_params = distribution.parameterize(x=embedding)
target_distr_params = target_distribution.parameterize(x=target_embedding)
q_value = self.tf_q_value(embedding=embedding, distr_params=distr_params, action=actions[name], name=name)
# Notice, this is V', not Q' because NAF outputs V(s) separately
next_state_value = target_distribution.state_value(distr_params=target_distr_params)
delta = self.tf_q_delta(q_value=q_value, next_q_value=next_state_value, terminal=terminal, reward=reward)
collapsed_size = util.prod(util.shape(delta)[1:])
delta = tf.reshape(tensor=delta, shape=(-1, collapsed_size))
deltas.append(delta)
# Surrogate loss as the mean squared error between actual observed rewards and expected rewards
loss_per_instance = tf.reduce_mean(input_tensor=tf.concat(values=deltas, axis=1), axis=1)
if self.huber_loss is not None and self.huber_loss > 0.0:
return tf.where(
condition=(tf.abs(x=loss_per_instance) <= self.huber_loss),
x=(0.5 * tf.square(x=loss_per_instance)),
y=(self.huber_loss * (tf.abs(x=loss_per_instance) - 0.5 * self.huber_loss))
)
else:
return tf.square(x=loss_per_instance)
def tf_regularization_losses(self, states, internals, update):
losses = super(QNAFModel, self).tf_regularization_losses(
states=states,
internals=internals,
update=update
)
for state_value in self.state_values.values():
regularization_loss = state_value.regularization_loss()
if regularization_loss is not None:
if 'state-values' in losses:
losses['state-values'] += regularization_loss
else:
losses['state-values'] = regularization_loss
for l_entries in self.l_entries.values():
regularization_loss = l_entries.regularization_loss()
if regularization_loss is not None:
if 'l-entries' in losses:
losses['l-entries'] += regularization_loss
else:
losses['l-entries'] = regularization_loss
return losses
def get_variables(self, include_submodules=False, include_nontrainable=False):
model_variables = super(QNAFModel, self).get_variables(
include_submodules=include_submodules,
include_nontrainable=include_nontrainable
)
state_values_variables = [
variable for name in sorted(self.state_values)
for variable in self.state_values[name].get_variables()
]
model_variables += state_values_variables
l_entries_variables = [
variable for name in sorted(self.l_entries)
for variable in self.l_entries[name].get_variables()
]
model_variables += l_entries_variables
return model_variables
|
py | b40e6a428edfb25c7f962f91c4bb829e41ba8fac | from rootpy.io import File
from rootpy import asrootpy
# Most verbose log level
# import matplotlib
# matplotlib.use('AGG')
import rootpy.plotting.root2matplotlib as rplt
from rootpy import ROOTError
import matplotlib.pyplot as plt
# from matplotlib.ticker import AutoMinorLocator
# import config.summations as summations
from ROOT import TGraphAsymmErrors, TF1, TLegend, TLatex
from array import array
from config import CMS
from tools.ROOT_utils import set_root_defaults
import matplotlib.cm as cm
from matplotlib.ticker import FormatStrFormatter
import numpy
from numpy import frompyfunc
from pylab import plot
from matplotlib import rc
rc('text', usetex=True)
def make_jet_response_plot(input_file, response):
global output_folder, output_formats, suffix
jet_response_plot = asrootpy(input_file.Get(response))
x_limits = [0, 200]
y_limits = [0.8, 1.2]
if '_eta' in response:
x_limits = [-3, 3]
x_title = '$\eta$(reco jet)'
else:
x_title = '$p_{\mathrm{T}}$(reco jet) [GeV]'
y_title = '$p_{\mathrm{T}}$(reco jet)/$p_{\mathrm{T}}$(HLT jet)'
save_as_name = response
plt.figure(figsize=(20, 16), dpi=200, facecolor='white')
ax0 = plt.axes()
ax0.minorticks_on()
ax0.grid(True, 'major', linewidth=2)
ax0.grid(True, 'minor')
plt.tick_params(**CMS.axis_label_major)
plt.tick_params(**CMS.axis_label_minor)
ax0.xaxis.set_major_formatter(FormatStrFormatter('%d'))
ax0.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
ax0.xaxis.labelpad = 12
ax0.yaxis.labelpad = 12
if '_prof' in response:
rplt.errorbar(jet_response_plot, xerr=True, emptybins=True, axes=ax0, marker = 'o', ms = 15, mew=3, lw = 2)
else:
im = rplt.imshow(jet_response_plot, axes=ax0, cmap = cm.Blues)
#plt.colorbar(im)
ax0.set_xlim(x_limits)
ax0.set_ylim(y_limits)
plt.tick_params(**CMS.axis_label_major)
plt.tick_params(**CMS.axis_label_minor)
plt.xlabel(x_title, CMS.x_axis_title)
plt.ylabel(y_title, CMS.y_axis_title)
plt.title(r'e+jets, CMS Preliminary, $\sqrt{s}$ = 8 TeV', CMS.title)
if '_prof' in response:
plt.legend(['data'], numpoints=1, loc='lower right', prop=CMS.legend_properties)
plt.tight_layout()
for output_format in output_formats:
plt.savefig(output_folder + save_as_name + '_' + suffix + '.' + output_format)
def make_jet_response_comparison_plot(input_files, response):
global output_folder, output_formats, suffix
if not '_prof' in response:
print 'Can\'t make comparison scatter plots!'
return
jet_responses = {}
for jet_response_name, file_name in input_files.iteritems():
jet_responses.update({jet_response_name : asrootpy(file_name.Get(response))})
x_limits = [0, 200]
y_limits = [0.7, 1.3]
if '_eta' in response:
x_limits = [-3, 3]
x_title = '$\eta$(reco jet)'
else:
x_title = '$p_{\mathrm{T}}$(reco jet) [GeV]'
y_title = '$p_{\mathrm{T}}$(reco jet)/$p_{\mathrm{T}}$(HLT jet)'
save_as_name = response
plt.figure(figsize=(20, 16), dpi=200, facecolor='white')
ax0 = plt.axes()
ax0.minorticks_on()
ax0.grid(True, 'major', linewidth=2)
ax0.grid(True, 'minor')
plt.tick_params(**CMS.axis_label_major)
plt.tick_params(**CMS.axis_label_minor)
ax0.xaxis.set_major_formatter(FormatStrFormatter('%d'))
ax0.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
ax0.xaxis.labelpad = 12
ax0.yaxis.labelpad = 12
jet_response_name_list = []
marker_face_colours = {
0 : 'black',
1 : 'red',
2 : 'blue',
3 : 'none',
4 : 'yellow'
}
marker_edge_colours = {
0 : 'black',
1 : 'red',
2 : 'blue',
3 : 'green',
4 : 'yellow'
}
markers = {
0 : 'o',
1 : 'v',
2 : '^',
3 : 's',
4 : '*'
}
fill_styles = {
0 : 'full',
1 : 'full',
2 : 'full',
3 : 'none',
4 : 'full'
}
counter = 0
for jet_response_name, jet_response in sorted(jet_responses.iteritems()):
rplt.errorbar(jet_response, xerr=False, emptybins=True, axes=ax0, marker = markers[counter], fillstyle = fill_styles[counter],
markerfacecolor = marker_face_colours[counter], markeredgecolor = marker_edge_colours[counter], ecolor = marker_edge_colours[counter], ms=15, mew=3, lw = 2)
jet_response_name_list.append(jet_response_name)
counter += 1
ax0.set_xlim(x_limits)
ax0.set_ylim(y_limits)
plt.tick_params(**CMS.axis_label_major)
plt.tick_params(**CMS.axis_label_minor)
plt.xlabel(x_title, CMS.x_axis_title)
plt.ylabel(y_title, CMS.y_axis_title)
plt.title(r'e+jets, CMS Preliminary, $\sqrt{s}$ = 8 TeV', CMS.title)
plt.legend(jet_response_name_list, numpoints=1, loc='lower right', prop=CMS.legend_properties)
plt.tight_layout()
for output_format in output_formats:
plt.savefig(output_folder + save_as_name + '_' + suffix + '.' + output_format)
def make_single_efficiency_plot(hist_passed, hist_total, efficiency):
global output_folder, output_formats, suffix
x_limits, x_title, y_title, fit_function, fit_range = get_parameters(efficiency)
plot_efficiency = asrootpy(TGraphAsymmErrors())
plot_efficiency.Divide(hist_passed, hist_total, "cl=0.683 b(1,1) mode")
fit_data = TF1("fit_data", fit_function, fit_range[0], fit_range[1])
set_parameter_limits(efficiency, fit_data)
try:
plot_efficiency.Fit(fit_data, 'FECQ')
except ROOTError, e:
print e.msg
pass
plot_efficiency.SetMarkerSize(2)
save_as_name = efficiency
# plot with matplotlib
plt.figure(figsize=(20, 16), dpi=200, facecolor='white')
ax0 = plt.axes()
ax0.minorticks_on()
ax0.grid(True, 'major', linewidth=2)
ax0.grid(True, 'minor')
plt.tick_params(**CMS.axis_label_major)
plt.tick_params(**CMS.axis_label_minor)
ax0.xaxis.set_major_formatter(FormatStrFormatter('%d'))
ax0.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
ax0.xaxis.labelpad = 11
#ax0.yaxis.labelpad = 20
rplt.errorbar(plot_efficiency, xerr=True, emptybins=True, axes=ax0, marker = 'o', ms = 15, mew=3, lw = 2)
ax0.set_xlim(x_limits)
ax0.set_ylim([0.0,1.1])
#add fits
x = numpy.linspace(fit_data.GetXmin(), fit_data.GetXmax(), fit_data.GetNpx())
function_data = frompyfunc(fit_data.Eval, 1, 1)
plot(x, function_data(x), axes=ax0, color = 'red', linewidth = 2)
plt.tick_params(**CMS.axis_label_major)
plt.tick_params(**CMS.axis_label_minor)
plt.xlabel(x_title, CMS.x_axis_title)
plt.ylabel(y_title, CMS.y_axis_title)
plt.title(r'e+jets, CMS Preliminary, $\sqrt{s}$ = 8 TeV', CMS.title)
plt.legend(['data', 'fit'], numpoints=1, loc='lower right', prop=CMS.legend_properties)
#add fit formulas
ax0.text(0.2, 0.15, '$\epsilon$ = ' + get_fitted_function_str(fit_data, fit_function),
verticalalignment='bottom', horizontalalignment='left',
transform=ax0.transAxes,
color='red', fontsize=60, bbox = dict(facecolor = 'white', edgecolor = 'none', alpha = 0.5))
plt.tight_layout()
for output_format in output_formats:
plt.savefig(output_folder + save_as_name + '_' + suffix + '.' + output_format)
def get_parameters(trigger_under_study):
x_limits = [10, 100]
x_title = '$p_{\mathrm{T}}$(jet) [GeV]'
#y_title = '$\epsilon$'
y_title = 'Efficiency'
fit_function = ''
fit_range = [-9999, 9999]
if '_pt' in trigger_under_study:
x_limits = [20, 100]
x_title = '$p_{\mathrm{T}}$(jet) [GeV]'
fit_function = "[0]*exp([1]*exp([2]*x))"
fit_range = [27, 100]
elif '_eta' in trigger_under_study:
x_limits = [-3, 3]
x_title = '$\eta$(jet)'
fit_function = '[0]*x*x + [1]*x + [2]'
#fit_function = '[2]'
fit_range = [-3, 3]
elif '_phi' in trigger_under_study:
x_limits = [-4., 4.]
x_title = '$\phi$(jet)'
fit_function = '[0]'
fit_range = [-3.1, 3.1]
return x_limits, x_title, y_title, fit_function, fit_range
def set_plot_styles(data_plot, mc_plot):
mc_plot.SetMarkerColor(2)
mc_plot.SetMarkerStyle(22)
mc_plot.SetMarkerSize(3)
mc_plot.SetLineWidth(6)
mc_plot.SetLineColor(2)
data_plot.SetMarkerSize(3)
def set_parameter_limits(trigger_under_study, fit):
if '_pt' in trigger_under_study:
fit.SetParLimits(0, 0.0, 1.0)
fit.SetParLimits(1, -100000.0, -1.0)
fit.SetParLimits(2, -2.0, -0.01)
if '_eta' in trigger_under_study:
fit.SetParLimits(0, -0.2, 0.0)
fit.SetParLimits(1, -1.0, -1.0)
fit.SetParLimits(2, 0.2, 1.1)
def get_binning(trigger_under_study):
bin_edges = [0, 30, 35, 40, 45, 50, 70, 100]
if '_pt' in trigger_under_study:
bin_edges = [0, 30, 35, 40, 45, 50, 70, 100]
elif '_eta' in trigger_under_study:
bin_edges = [-3, -2.5, -2, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, 2, 2.5, 3]
bin_edges = [-3, -2, -1, 0, 1, 2, 3]
bin_edges = [-3, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, 3]
elif '_phi' in trigger_under_study:
bin_edges = [-3.5, -3, -2.5, -2, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, 2, 2.5, 3, 3.5]
bin_edge_array = array('d', bin_edges)
return bin_edge_array
def get_fitted_function_str(fit, fit_function):
decimals = 3
function_str = fit_function
function_str = function_str.replace('x*x', 'x^{2}')
function_str = function_str.replace('[0]', str('%.2g' % fit.GetParameter(0)))
#function_str = function_str.replace('[1]', str(round(fit.GetParameter(1), decimals)))
function_str = function_str.replace('[1]', str('%.2g' % fit.GetParameter(1)))
function_str = function_str.replace('[2]', str('%.2g' % fit.GetParameter(2)))
function_str = function_str.replace('[3]', str('%.2g' % fit.GetParameter(3)))
function_str = function_str.replace('[4]', str('%.2g' % fit.GetParameter(4)))
print function_str
function_str = function_str.replace('*', ' \\times ')
function_str = function_str.replace('0 \\times x^{2}', '')
function_str = function_str.replace('0 \\times x', '')
function_str = function_str.strip()#remove whitespace
function_str = function_str.replace('+ -', '-')
function_str = function_str.replace('- +', '-')
function_str = function_str.replace('- -', '+')
function_str = function_str.replace('+ +', '+')
function_str = function_str.replace('1 \\times', '1.0 \\times')
function_str = function_str.replace('e+0', '\\times 10^')
function_str = function_str.replace('(1\\times', '(')
function_str = function_str.replace('(-1\\times', '(-')
if function_str.startswith('+'):
function_str = function_str[1:]
if 'exp' in function_str:
function_str = function_str.replace('exp(', 'e^{\left(')
function_str = function_str.replace(')', '\\right)}')
function_str = '$' + function_str + '$'
print function_str
return function_str
def get_input_efficiency(file, efficiency_instance):
efficiency = file.Get(efficiency_instance)
hist_passed = efficiency.GetPassedHistogram()
hist_total = efficiency.GetTotalHistogram()
bin_edge_array = get_binning(efficiency_instance)
n_bins = len(bin_edge_array) - 1
#hist_passed = asrootpy(hist_passed.Rebin(n_bins, 'truth', bin_edge_array))
#hist_total = asrootpy(hist_total.Rebin(n_bins, 'truth', bin_edge_array))
hist_passed = asrootpy(hist_passed)
hist_total = asrootpy(hist_total)
return hist_passed, hist_total
if __name__ == '__main__':
set_root_defaults()
CMS.title['fontsize'] = 40
CMS.x_axis_title['fontsize'] = 50
CMS.y_axis_title['fontsize'] = 50
CMS.axis_label_major['labelsize'] = 40
CMS.axis_label_minor['labelsize'] = 40
CMS.legend_properties['size'] = 40
output_formats = ['pdf']
output_folder = './HLT_plots/'
histFile = 'hists_JEC_3rd_jet_30.root'
#histFile = 'hists_JEC_4th_jet_30.root'
#histFile = 'hists_JEC_3rd_jet_45.root'
#histFile = 'hists_uncorr_3rd_jet_30.root'
#histFile = 'hists_uncorr_4th_jet_30.root'
#histFile = 'hists_uncorr_3rd_jet_45.root'
#histFile = 'hists_JEC_PFnoPU_3rd_jet_30.root'
#histFile = 'hists_JEC_PFnoPU_4th_jet_30.root'
#histFile = 'hists_JEC_PFnoPU_3rd_jet_45.root'
suffix = 'JEC_3rd_30'
input_file = File(histFile)
efficiencyPlots = ['trigger_eff_pt', 'trigger_eff_eta']
for efficiency in efficiencyPlots:
hist_passed, hist_total = get_input_efficiency(input_file, efficiency)
make_single_efficiency_plot(hist_passed, hist_total, efficiency)
#ptRatioPlots = ['ptRatio_eta_prof'] #, 'ptRatio_pt_prof'] #, 'ptRatio_eta', 'ptRatio_pt']
#for ptRatio in ptRatioPlots:
#make_jet_response_plot(input_file, ptRatio)
#make_jet_response_comparison_plot(input_files, ptRatio)
|
py | b40e6a9b32883449c2152457d1af93bfe65d838d | from flask import Flask
from flask.ext.bcrypt import Bcrypt
from flask.ext.login import LoginManager
from flask.ext.sqlalchemy import SQLAlchemy
app = Flask(__name__)
# password hashing
bcrypt = Bcrypt(app)
# manage logins
lm = LoginManager()
lm.init_app(app)
lm.login_view = 'login'
# SQL
app.config.from_object('config')
db = SQLAlchemy(app)
# now load the app. Done last to avoid self-referencing
from app import views, models |
py | b40e6aafe4d782b391652b344f0535ee24e797c9 | #!/usr/bin/env python
import sys
# get all lines from stdin
for line in sys.stdin:
# remove leading and trailing whitespace
line = line.strip()
line = line.lower()
# split the line into words; splits on any whitespace
words = line.split()
#set stopwords
stopwords = set(['the','and','I','an','a','in','t','my','of','is','s','am','or'])
# output tuples (word, 1) in tab-delimited format
for word in words:
if word not in stopwords:
print '%s\t%s' % (word, "1")
|
py | b40e6b52ad88976622f0534d8360475a71e54171 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Import encoders so that decorated encoders are added to registry."""
# pylint: disable=unused-import
from language.mentionmemory.encoders import bert_encoder
from language.mentionmemory.encoders import eae_encoder
from language.mentionmemory.encoders import mauto_encoder
from language.mentionmemory.encoders import mention_memory_encoder
from language.mentionmemory.encoders import readtwice_encoder
# pylint: enable=unused-import
|
py | b40e6b7e9a08a72fed6d39937325e2930ca214c9 | import wx
import os
class Window(wx.Frame):
def __init__(self, parent, title):
wx.Frame.__init__(self, parent, title = title, size = (640, 480))
self.Modify = False
self.LastSave = ""
self.replace = False
self.SetIcon(wx.Icon('note.ico', wx.BITMAP_TYPE_ICO))
self.TextField = wx.TextCtrl(self, style = wx.TE_MULTILINE | wx.TE_PROCESS_ENTER)
self.TextField.SetFocus()
self.Show(True)
#--------------------------------------------------------------------------------------------#
File = wx.Menu()
newItem = wx.MenuItem(File, wx.ID_NEW, "New", "Push the button to create new file")
File.Append(newItem)
File.AppendSeparator()
openItem = wx.MenuItem(File, wx.ID_OPEN, "Open", "Push the button to open file")
File.Append(openItem)
saveItem = wx.MenuItem(File, wx.ID_SAVE, "Save", "Push the button to save file")
File.Append(saveItem)
saveAsItem = wx.MenuItem(File, wx.ID_SAVEAS, "Save As", "Push the button to save file as")
File.Append(saveAsItem)
File.AppendSeparator()
exitItem = wx.MenuItem(File, wx.ID_EXIT, "Exit", "Push the button to leave this application")
File.Append(exitItem)
#--------------------------------------------------------------------------------------------#
Edit = wx.Menu()
undoItem = wx.MenuItem(Edit, wx.ID_UNDO, "Undo", "Push the button to return back on text")
Edit.Append(undoItem)
Edit.AppendSeparator()
cutItem = wx.MenuItem(Edit, wx.ID_CUT, "Cut", "Puch the button to cut text")
Edit.Append(cutItem)
copyItem = wx.MenuItem(Edit, wx.ID_COPY, "Copy", "Puch the button to copy text")
Edit.Append(copyItem)
pasteItem = wx.MenuItem(Edit, wx.ID_PASTE, "Paste", "Puch the button to paste text")
Edit.Append(pasteItem)
deleteItem = wx.MenuItem(Edit, wx.ID_DELETE, "Delete", "Puch the button to delete text")
Edit.Append(deleteItem)
Edit.AppendSeparator()
selectAllItem = wx.MenuItem(Edit, wx.ID_SELECTALL, "Select All", "Push the button to select all the text")
Edit.Append(selectAllItem)
#--------------------------------------------------------------------------------------------#
Format = wx.Menu()
chooseColorItem = wx.MenuItem(Format, wx.ID_ANY, "Choose Color", "Push the button to choose color")
Format.Append(chooseColorItem)
Format.AppendSeparator()
chooseFontItem = wx.MenuItem(Format, wx.ID_ANY, "Choose Font", "Push the button to choose font")
Format.Append(chooseFontItem)
#--------------------------------------------------------------------------------------------#
View = wx.Menu()
statusBarItem = wx.MenuItem(View, wx.ID_ANY, "Status Bat", "Show Status Bar")
View.Append(statusBarItem)
#--------------------------------------------------------------------------------------------#
Help = wx.Menu()
aboutItem = wx.MenuItem(Help, wx.ID_ABOUT, "About", "Push the button to get an information about this application")
Help.Append(aboutItem)
#--------------------------------------------------------------------------------------------#
MenuBar = wx.MenuBar()
MenuBar.Append(File, 'File')
MenuBar.Append(Edit, 'Edit')
MenuBar.Append(Format, 'Format')
MenuBar.Append(View, 'View')
MenuBar.Append(Help, 'Help')
self.SetMenuBar(MenuBar)
#--------------------------------------------------------------------------------------------#
self.Bind(wx.EVT_MENU, self.OnNew, newItem)
self.Bind(wx.EVT_MENU, self.OnOpen, openItem)
self.Bind(wx.EVT_MENU, self.OnSave, saveItem)
self.Bind(wx.EVT_MENU, self.OnSaveAs, saveAsItem)
self.Bind(wx.EVT_MENU, self.OnExit, exitItem)
self.Bind(wx.EVT_MENU, self.OnUndo, undoItem)
self.Bind(wx.EVT_MENU, self.OnCut, cutItem)
self.Bind(wx.EVT_MENU, self.OnCopy, copyItem)
self.Bind(wx.EVT_MENU, self.OnPaste, pasteItem)
self.Bind(wx.EVT_MENU, self.OnDelete, deleteItem)
self.Bind(wx.EVT_MENU, self.OnSelectAll, selectAllItem)
self.Bind(wx.EVT_MENU, self.OnChooseColor, chooseColorItem)
self.Bind(wx.EVT_MENU, self.OnChooseFont, chooseFontItem)
self.Bind(wx.EVT_MENU, self.OnStatusBar, statusBarItem)
self.Bind(wx.EVT_MENU, self.OnAbout, aboutItem)
self.TextField.Bind(wx.EVT_TEXT, self.OnTextChanged)
self.TextField.Bind(wx.EVT_KEY_DOWN, self.OnKeyDown)
self.Bind(wx.EVT_CLOSE, self.OnExit)
self.StatusBar()
#--------------------------------------------------------------------------------------------#
def OnNew(self, event):
if self.Modify:
QuestionDialog = wx.MessageDialog(self, 'Save before create new file?', '', wx.YES_NO | wx.YES_DEFAULT | wx.ICON_QUESTION)
Answer = QuestionDialog.ShowModal()
if Answer == wx.ID_YES:
self.OnSave(event)
self.TextField.SetValue("")
if not self.Modify:
self.TextField.SetValue("")
else:
self.TextField.SetValue("")
else:
self.TextField.SetValue("")
window.SetTitle("Untitled" + " - PyNote")
def OnOpen(self, event):
FName = os.path.basename(self.LastSave)
if self.Modify:
QuestionDialog = wx.MessageDialog(self, "Save changes?", "", wx.YES_NO | wx.YES_DEFAULT | wx.CANCEL | wx.ICON_QUESTION)
Answer = QuestionDialog.ShowModal()
if Answer == wx.ID_YES:
self.OnSave(event)
self.DoOpen()
elif Answer == wx.ID_CANCEL:
QuestionDialog.Destroy()
else:
self.DoOpen()
else:
self.DoOpen()
def DoOpen(self):
self.dirname = " "
openDialog = wx.FileDialog(self, "Choose a file to open", self.dirname, " ", "*.*", wx.FD_OPEN)
if openDialog.ShowModal() == wx.ID_OK:
#path = openDialog.GetPath()
try:
'''file = open(path, 'r')
text = file.read()
file.close()
self.TextField.WriteText(text)
self.LastSave = path
self.Modify = False'''
self.filename = openDialog.GetFilename()
self.dirname = openDialog.GetDirectory()
file = open(os.path.join(self.dirname, self.filename), "r")
self.TextField.SetValue(file.read())
file.close()
window.SetTitle(self.filename + " - PyNote")
except IOError:
ErrorDialog = wx.MessageDialog(self, "Error opening file\n" + str(IOError))
ErrorDialog.ShowModal()
except UnicodeDecodeError:
ErrorDialog = wx.MessageDialog(self, "Error Opening file\n" + str(UnicodeDecodeError))
ErrorDialog.ShowModal()
openDialog.Destroy()
def OnSave(self, event):
if self.LastSave:
try:
file = open(self.LastSave, 'w')
text = self.TextField.GetValue()
file.write(text)
file.close()
self.Modify = False
except IOError:
ErrorDialog = wx.MessageDialog(self, "Error saving file\n" + str(IOError))
ErrorDialog.ShowModal()
else:
self.OnSaveAs(event)
def OnSaveAs(self, event):
saveDialog = wx.FileDialog(self, "Save file as...", " ", " ", "*.*", wx.FD_SAVE)
if saveDialog.ShowModal() == wx.ID_OK:
#path = saveDialog.GetPath()
try:
'''file = open(path, 'w')
text = self.TextField.GetValue()
file.write(text)
file.close()
self.LastSave = os.path.basename(path)
self.Modify = False'''
text = self.TextField.GetValue()
self.filename = saveDialog.GetFilename()
self.dirname = saveDialog.GetDirectory()
file = open(os.path.join(self.dirname, self.filename), 'w')
file.write(text)
file.close()
self.LastSave = os.path.basename(os.path.join(self.dirname, self.filename))
self.Modify = False
window.SetTitle(self.filename + " - PyNote")
except:
ErrorDialog = wx.MessageDialog(self, "Error saving file\n" + str(IOError))
ErrorDialog.ShowModal()
saveDialog.Destroy()
def OnTextChanged(self, event):
self.Modify = True
event.Skip()
def OnExit(self, event):
if self.Modify:
QuestionDialog = wx.MessageDialog(self, 'Save before Exit?', '', wx.YES_NO | wx.YES_DEFAULT | wx.CANCEL | wx.ICON_QUESTION)
Answer = QuestionDialog.ShowModal()
if Answer == wx.ID_YES:
self.OnSave(event)
if not self.Modify:
wx.Exit()
elif Answer == wx.ID_CANCEL:
QuestionDialog.Destroy()
else:
self.Destroy()
else:
self.Destroy()
#--------------------------------------------------------------------------------------------#
def OnUndo(self, event):
pass
def OnCut(self, event):
self.TextField.Cut()
def OnCopy(self, event):
self.TextField.Copy()
def OnPaste(self, event):
self.TextField.Paste()
def OnDelete(self, event):
textRemove, to = self.TextField.GetSelection()
self.TextField.Remove(textRemove, to)
def OnSelectAll(self, event):
self.TextField.SelectAll()
#--------------------------------------------------------------------------------------------#
def OnChooseColor(self, event):
pass
def OnChooseFont(self, event):
pass
#--------------------------------------------------------------------------------------------#
def OnStatusBar(self, event):
if self.statusbar.IsShown():
self.statusbar.Hide()
else:
self.statusbar.Show()
def StatusBar(self):
self.statusbar = self.CreateStatusBar()
self.statusbar.SetFieldsCount(3)
self.statusbar.SetStatusWidths([-5, -2, -1])
#--------------------------------------------------------------------------------------------#
def OnAbout(self, event):
aboutDialog = wx.MessageDialog(self, "PyNote\t\n Version:\t\t 1.19.08 \nDenis Ostrovsky 2017", "About PyNote", wx.OK)
aboutDialog.ShowModal()
#--------------------------------------------------------------------------------------------#
def OnKeyDown(self, event):
keyCode = event.GetKeyCode()
if keyCode == wx.WXK_INSERT:
if not self.replace:
self.replace = True
else:
self.replace = False
event.Skip()
app = wx.App()
window = Window(None, "Untitled" + " - PyNote")
app.MainLoop() |
py | b40e6c2978e0ce58bea1ee5137c6090305812c23 | import numpy as np
from scipy.linalg import sqrtm
import math
import time
import datetime
from util_functions import vectorize, matrixize
from Recommendation import Recommendation
from BaseAlg import BaseAlg
import warnings
class CoLinUCBUserSharedStruct(object):
def __init__(self, featureDimension, lambda_, userNum, W):
self.currentW = np.identity(n = userNum)
self.W = W
print "W: ", self.W
self.userNum = userNum
self.A = lambda_*np.identity(n = featureDimension*userNum)
self.b = np.zeros(featureDimension*userNum)
self.AInv = np.linalg.inv(self.A)
self.UserTheta = np.zeros(shape = (featureDimension, userNum))
self.CoTheta = np.zeros(shape = (featureDimension, userNum))
self.BigW = np.kron(np.transpose(W), np.identity(n=featureDimension))
print "Big W: ", self.BigW
self.CCA = np.dot(np.dot(self.BigW , self.AInv), np.transpose(self.BigW))
self.alpha_t = 0.0
self.sigma = 1.e-200 #Used in the high probability bound, i.e, with probability at least (1 - sigma) the confidence bound. So sigma should be very small
self.lambda_ = lambda_
def updateParameters(self, articlePicked, click, userID, update='Inv'):
X = vectorize(np.outer(articlePicked.contextFeatureVector, self.W.T[userID]))
#print "X: " + str(X)
change = np.outer(X, X)
self.A += change
self.b += click*X
if update == 'Inv':
self.AInv = np.linalg.inv(self.A)
else:
self.AInv = self.AInv - float(np.dot(self.AInv, np.dot(outer, self.AInv)))/(1.0+np.dot(np.transpose(X), np.dot(self.AInv, X) ))
self.UserTheta = matrixize(np.dot(self.AInv, self.b), len(articlePicked.contextFeatureVector))
self.CoTheta = np.dot(self.UserTheta, self.W)
self.CCA = np.dot(np.dot(self.BigW , self.AInv), np.transpose(self.BigW))
def getProb(self, alpha, article, userID):
warnings.filterwarnings('error')
TempFeatureM = np.zeros(shape =(len(article.contextFeatureVector), self.userNum))
TempFeatureM.T[userID] = article.contextFeatureVector
TempFeatureV = vectorize(TempFeatureM)
mean = np.dot(self.CoTheta.T[userID], article.contextFeatureVector)
var = np.sqrt(np.dot(np.dot(TempFeatureV, self.CCA), TempFeatureV))
#self.alpha_t = 0.01*np.sqrt(np.log(np.linalg.det(self.A)/float(self.sigma * self.lambda_) )) + np.sqrt(self.lambda_)
try:
self.alpha_t = 0.01*np.sqrt(np.log(np.linalg.det(self.A)/float(self.sigma * self.lambda_) )) + np.sqrt(self.lambda_)
except:
self.alpha_t = 0.0
#pta = mean + alpha * var # use emprically tuned alpha
pta = mean + self.alpha_t *var # use the theoretically computed alpha_t
return pta
def getUserCoTheta(self, userID):
return self.CoTheta.T[userID]
def getCCA(self):
return self.CCA
def calculateAlphaT(self):
warnings.filterwarnings('error')
try:
self.alpha_t = 0.01*np.sqrt(np.log(np.linalg.det(self.A)/float(self.sigma * self.lambda_) )) + np.sqrt(self.lambda_)
except:
self.alpha_t = 0.0
return self.alpha_t
#---------------CoLinUCB(fixed user order) algorithms: Asynisized version and Synchorized version
class CoLinUCBAlgorithm(BaseAlg):
def __init__(self, arg_dict, update='inv'): # n is number of users
BaseAlg.__init__(self, arg_dict)
self.update = update #default is inverse. Could be 'rankone' instead.
self.USERS = CoLinUCBUserSharedStruct(arg_dict['dimension'], arg_dict['lambda_'], arg_dict['n_users'], arg_dict['W'])
def decide_old(self, pool_articles, userID, exclude = []):
maxPTA = float('-inf')
articlePicked = None
for x in pool_articles:
x_pta = self.USERS.getProb(self.alpha, x, userID)
# pick article with highest Prob
if maxPTA < x_pta:
articlePicked = x
maxPTA = x_pta
return [articlePicked]
def decide(self, pool_articles, userID, k = 1):
# MEAN
art_features = np.empty([len(pool_articles), len(pool_articles[0].contextFeatureVector)])
for i in range(len(pool_articles)):
art_features[i, :] = pool_articles[i].contextFeatureVector
user_features = self.USERS.CoTheta.T[userID]
mean_matrix = np.dot(art_features, user_features)
# VARIANCE
art_temp_features = np.empty([len(pool_articles), len(pool_articles[0].contextFeatureVector)*self.n_users])
for i in range(len(pool_articles)):
TempFeatureM = np.zeros(shape =(len(pool_articles[0].contextFeatureVector), self.n_users))
TempFeatureM.T[userID] = pool_articles[i].contextFeatureVector
art_temp_features[i, :] = vectorize(TempFeatureM)
var_matrix = np.sqrt(np.dot(np.dot(art_temp_features, self.USERS.CCA), art_temp_features.T))
#self.USERS.calculateAlphaT()
if self.use_alpha_t:
self.USERS.calculateAlphaT()
pta_matrix = mean_matrix + self.USERS.alpha_t*np.diag(var_matrix)
else:
pta_matrix = mean_matrix + self.alpha*np.diag(var_matrix)
pool_positions = np.argsort(pta_matrix)[(k*-1):]
articles = []
for i in range(k):
articles.append(pool_articles[pool_positions[i]])
return articles
#return pool_articles[pool_position]
def updateParameters(self, articlePicked, click, userID, update='Inv'):
self.USERS.updateParameters(articlePicked, click, userID, update)
def getLearntParameters(self, userID):
return self.USERS.UserTheta.T[userID]
def getTheta(self, userID):
return self.USERS.UserTheta.T[userID]
def getCoTheta(self, userID):
return self.USERS.CoTheta.T[userID]
def getA(self):
return self.USERS.A
def getW(self, userID):
return self.USERS.W
|
py | b40e6d2ef8f49034342c74a312419189000c267e | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import optparse
import subprocess
import sys
import threading
import time
from git_common import GIT_EXE, GIT_TRANSIENT_ERRORS_RE
class TeeThread(threading.Thread):
def __init__(self, fd, out_fd, name):
super(TeeThread, self).__init__(name='git-retry.tee.%s' % (name,))
self.data = None
self.fd = fd
self.out_fd = out_fd
def run(self):
chunks = []
for line in self.fd:
chunks.append(line)
self.out_fd.write(line)
self.data = ''.join(chunks)
class GitRetry(object):
logger = logging.getLogger('git-retry')
DEFAULT_DELAY_SECS = 3.0
DEFAULT_RETRY_COUNT = 5
def __init__(self, retry_count=None, delay=None, delay_factor=None):
self.retry_count = retry_count or self.DEFAULT_RETRY_COUNT
self.delay = max(delay, 0) if delay else 0
self.delay_factor = max(delay_factor, 0) if delay_factor else 0
def shouldRetry(self, stderr):
m = GIT_TRANSIENT_ERRORS_RE.search(stderr)
if not m:
return False
self.logger.info("Encountered known transient error: [%s]",
stderr[m.start(): m.end()])
return True
@staticmethod
def execute(*args):
args = (GIT_EXE,) + args
proc = subprocess.Popen(
args,
stderr=subprocess.PIPE,
)
stderr_tee = TeeThread(proc.stderr, sys.stderr, 'stderr')
# Start our process. Collect/tee 'stdout' and 'stderr'.
stderr_tee.start()
try:
proc.wait()
except KeyboardInterrupt:
proc.kill()
raise
finally:
stderr_tee.join()
return proc.returncode, None, stderr_tee.data
def computeDelay(self, iteration):
"""Returns: the delay (in seconds) for a given iteration
The first iteration has a delay of '0'.
Args:
iteration: (int) The iteration index (starting with zero as the first
iteration)
"""
if (not self.delay) or (iteration == 0):
return 0
if self.delay_factor == 0:
# Linear delay
return iteration * self.delay
# Exponential delay
return (self.delay_factor ** (iteration - 1)) * self.delay
def __call__(self, *args):
returncode = 0
for i in xrange(self.retry_count):
# If the previous run failed and a delay is configured, delay before the
# next run.
delay = self.computeDelay(i)
if delay > 0:
self.logger.info("Delaying for [%s second(s)] until next retry", delay)
time.sleep(delay)
self.logger.debug("Executing subprocess (%d/%d) with arguments: %s",
(i+1), self.retry_count, args)
returncode, _, stderr = self.execute(*args)
self.logger.debug("Process terminated with return code: %d", returncode)
if returncode == 0:
break
if not self.shouldRetry(stderr):
self.logger.error("Process failure was not known to be transient; "
"terminating with return code %d", returncode)
break
return returncode
def main(args):
parser = optparse.OptionParser()
parser.disable_interspersed_args()
parser.add_option('-v', '--verbose',
action='count', default=0,
help="Increase verbosity; can be specified multiple times")
parser.add_option('-c', '--retry-count', metavar='COUNT',
type=int, default=GitRetry.DEFAULT_RETRY_COUNT,
help="Number of times to retry (default=%default)")
parser.add_option('-d', '--delay', metavar='SECONDS',
type=float, default=GitRetry.DEFAULT_DELAY_SECS,
help="Specifies the amount of time (in seconds) to wait "
"between successive retries (default=%default). This "
"can be zero.")
parser.add_option('-D', '--delay-factor', metavar='FACTOR',
type=int, default=2,
help="The exponential factor to apply to delays in between "
"successive failures (default=%default). If this is "
"zero, delays will increase linearly. Set this to "
"one to have a constant (non-increasing) delay.")
opts, args = parser.parse_args(args)
# Configure logging verbosity
if opts.verbose == 0:
logging.getLogger().setLevel(logging.WARNING)
elif opts.verbose == 1:
logging.getLogger().setLevel(logging.INFO)
else:
logging.getLogger().setLevel(logging.DEBUG)
# Execute retries
retry = GitRetry(
retry_count=opts.retry_count,
delay=opts.delay,
delay_factor=opts.delay_factor,
)
return retry(*args)
if __name__ == '__main__':
logging.basicConfig()
logging.getLogger().setLevel(logging.WARNING)
sys.exit(main(sys.argv[2:]))
|
py | b40e6efb14cb3440c3862e4f4dacb83c885286da | from dash import dcc, html
from dash.dependencies import Input, Output
from app import app
layout = html.Div([
dcc.RangeSlider(
id='my-range-slider',
min=0,
max=20,
step=0.5,
value=[5, 15]
),
html.Div(id='output-container-range-slider')
])
@app.callback(
Output('output-container-range-slider', 'children'),
[Input('my-range-slider', 'value')])
def update_output(value):
return f'你选择了 "{value}"'
|
py | b40e6fb41edfd1b0bd8ecd7aea31e21f815d0f8a | from pyfirmata import Arduino
PORT = '/dev/cu.usbmodem1421'
DELAY = .5
board = Arduino(PORT)
def blink(pin):
board.digital[pin].write(1)
board.pass_time(DELAY)
board.digital[pin].write(0)
board.pass_time(DELAY)
while True:
blink(13)
blink(11)
blink(9)
blink(7)
|
py | b40e7000a75080d12628b9fedd036b2faf7ed781 | # Copyright (C) 2018 Verizon. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rest_framework import serializers
class OperateVnfRequestSerializer(serializers.Serializer):
changeStateTo = serializers.ChoiceField(
help_text="The desired operational state (i.e. started or stopped) to change the VNF to.",
choices=["STARTED", "STOPPED"],
required=True)
stopType = serializers.ChoiceField(
help_text="It signals whether forceful or graceful stop is requested.",
choices=["FORCEFUL", "GRACEFUL"],
required=False)
gracefulStopTimeout = serializers.IntegerField(
help_text="The time interval to wait for the VNF to be taken out of service during graceful stop.",
required=False)
additionalParams = serializers.DictField(
help_text="Additional input parameters for the operate process, \
specific to the VNF being operated, \
as declared in the VNFD as part of OperateVnfOpConfig.",
child=serializers.CharField(help_text="KeyValue Pairs", allow_blank=True),
required=False,
allow_null=True)
|
py | b40e70347d4b3e258db69d3d775ab68d0bb8ce6c | ''' Modifying and extending Named Tuples
Named tuples are immutable
How we can change values inside the tuple
Just like with strings, we have to create a new tuple, with the modified values
Point2D = namedtuple('Point2D', 'x y')
pt = Point2D(0, 0) # will not work
Suppose we need to change the value of the x coordinate:
Simple approach: pt = Point2P(100, pt.y)
Note: the memory address of pt has now changed
This approach can work well, but it has a major drawback
Stock = namedtuple('Stock", 'symbol year month day open high low close')
djia - Stock('DJIA', 2021, 7, 23, 26313, 26458, 26260, 26393)
Maybe slicing or unpacking?
djia - Stock('DJIA', 2021, 7, 23, 26313, 26458, 26260, 26393)
current = djia[:7] current -> djia - Stock('DJIA', 2021, 7, 23, 26313, 26458, 26260)
*current, _ = djia djia - Stock['DJIA', 2021, 7, 23, 26313, 26458, 26260]
djia = Stock(*current, 26394)
I can also use the _make class method - but we need to create an iterable that contains all the values first:
new_values = current + (26394,) new_values = current.append(26394)
new_ values -> 'DJIA', 2021, 7, 23, 26313, 26458, 26260, 26393, 26394
djia =
stock._make()
''' |
py | b40e703c490cd2b9812938404bd364348d492214 | # Lint as: python3
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests `utils.py`."""
import csv
import functools
import os
import tempfile
import unittest
import mock
import pandas as pd
from pandas import testing as pdt
import tensorflow as tf
from tfrecorder import beam_image
from tfrecorder import constants
from tfrecorder import dataset_loader
from tfrecorder import input_schema
from tfrecorder import test_utils
from tfrecorder import utils
# pylint: disable=protected-access
class CheckTFRecordsTest(unittest.TestCase):
"""Tests `check_tfrecords`."""
def setUp(self):
"""Test setup."""
image_height = 40
image_width = 30
image_channels = 3
image_fn = functools.partial(
test_utils.make_random_image, image_height, image_width,
image_channels)
data = test_utils.get_test_data()
schema = input_schema.IMAGE_CSV_SCHEMA
image_uri_key = schema.image_uri_key
num_records = len(data[image_uri_key])
image_uris = data.pop(image_uri_key)
data['image_name'] = [os.path.split(uri)[-1] for uri in image_uris]
data.update({
'image': [beam_image.encode(image_fn())
for _ in range(num_records)],
'image_height': [image_height] * num_records,
'image_width': [image_width] * num_records,
'image_channels': [image_channels] * num_records,
})
self.tfrecord_dir = 'gs://path/to/tfrecords/dir'
self.split = 'train'
self.num_records = num_records
self.data = data
self.dataset = tf.data.Dataset.from_tensor_slices(self.data)
@mock.patch.object(dataset_loader, 'load', autospec=True)
def test_valid_records(self, mock_fn):
"""Tests valid case on reading multiple records."""
mock_fn.return_value = {self.split: self.dataset}
num_records = len(self.data['image'])
with tempfile.TemporaryDirectory() as dir_:
actual_dir = utils.inspect(
self.tfrecord_dir, split=self.split, num_records=num_records,
output_dir=dir_)
self.assertTrue('check-tfrecords-' in actual_dir)
actual_csv = os.path.join(actual_dir, 'data.csv')
self.assertTrue(os.path.exists(actual_csv))
_ = self.data.pop('image')
# Check output CSV
actual_df = pd.read_csv(actual_csv)
exp_df = pd.DataFrame(self.data)
pdt.assert_frame_equal(actual_df, exp_df)
# Check output images
actual_image_files = [
f for f in os.listdir(actual_dir) if f.endswith('.jpg')]
expected_image_files = self.data['image_name']
self.assertCountEqual(actual_image_files, expected_image_files)
@mock.patch.object(dataset_loader, 'load', autospec=True)
def test_no_data_for_split(self, mock_fn):
"""Check exception raised when data could not be loaded given `split`."""
mock_fn.return_value = {}
with self.assertRaisesRegex(ValueError, 'Could not load data for'):
utils.inspect(self.tfrecord_dir, split='UNSUPPORTED')
if __name__ == '__main__':
unittest.main()
class CopyLogTest(unittest.TestCase):
"""Misc tests for _copy_logfile_to_gcs."""
def test_valid_copy(self):
"""Test valid file copy."""
with tempfile.TemporaryDirectory() as tmpdirname:
text = 'log test log test'
infile = os.path.join(tmpdirname, 'foo.log')
with open(infile, 'w') as f:
f.write(text)
utils.copy_logfile_to_gcs(infile, tmpdirname)
outfile = os.path.join(tmpdirname, constants.LOGFILE)
with open(outfile, 'r') as f:
data = f.read()
self.assertEqual(text, data)
def test_invalid_copy(self):
"""Test invalid file copy."""
with tempfile.TemporaryDirectory() as tmpdirname:
infile = os.path.join(tmpdirname, 'foo.txt')
with self.assertRaises(FileNotFoundError):
utils.copy_logfile_to_gcs(infile, tmpdirname)
class PathSplitTest(unittest.TestCase):
"""Tests `_path_split`."""
def test_local_and_gcs(self):
"""Tests both local and GCS paths."""
filename = 'image_file.jpg'
dirpaths = ['/path/to/image/dir/', 'gs://path/to/image/dir/']
for dir_ in dirpaths:
filepath = os.path.join(dir_, filename)
act_dirpath, act_filename = utils._path_split(filepath)
self.assertEqual(act_dirpath, dir_.rsplit('/', 1)[0])
self.assertEqual(act_filename, filename)
class ReadImageDirectoryTest(unittest.TestCase):
"""Tests `read_image_directory`."""
def setUp(self):
self.image_data = test_utils.get_test_df()
self.tempfiles = []
self.tempdir = None
self.schema = input_schema.Schema(
input_schema.IMAGE_CSV_SCHEMA.input_schema_map)
def tearDown(self):
for fp in self.tempfiles:
fp.close()
self.tempdir.cleanup()
def test_normal(self):
"""Tests conversion of expected directory structure on local machine."""
g = self.image_data.groupby([self.schema.split_key, self.schema.label_key])
self.tempdir = tempfile.TemporaryDirectory()
rows = []
for (split, label), indices in g.groups.items():
dir_ = os.path.join(self.tempdir.name, split, label)
os.makedirs(dir_)
for f in list(self.image_data.loc[indices, self.schema.image_uri_key]):
_, name = os.path.split(f)
fp = tempfile.NamedTemporaryFile(
dir=dir_, suffix='.jpg', prefix=name)
self.tempfiles.append(fp)
rows.append([split, fp.name, label])
columns = list(input_schema.IMAGE_CSV_SCHEMA.get_input_keys())
actual = utils.read_image_directory(self.tempdir.name)
actual.sort_values(by=columns, inplace=True)
actual.reset_index(drop=True, inplace=True)
expected = pd.DataFrame(rows, columns=columns)
expected.sort_values(by=columns, inplace=True)
expected.reset_index(drop=True, inplace=True)
pd.testing.assert_frame_equal(actual, expected)
class CreateImageCSVTest(unittest.TestCase):
"""Tests `create_image_csv`."""
@mock.patch.object(utils, 'read_image_directory', autospec=True)
def test_normal(self, mock_fn):
"""Tests normal case."""
exp_df = test_utils.get_test_df()
mock_fn.return_value = exp_df
image_dir = 'path/to/image/dir'
with tempfile.NamedTemporaryFile(
mode='w+', prefix='image', suffix='.csv') as f:
utils.create_image_csv(image_dir, f.name)
# CSV should not have an index column
reader = csv.reader(f, delimiter=',')
row = next(reader)
exp_columns = exp_df.columns
self.assertEqual(len(row), len(exp_columns))
# CSV should match input data
actual = pd.read_csv(f.name, names=exp_columns)
pd.testing.assert_frame_equal(actual, exp_df)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.