ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b411a8a8a30a0a4f4dadfb90cce271e6c6f1d519 | # Lint as: python2, python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for detection_3d_lib."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from lingvo import compat as tf
from lingvo.core import test_utils
from lingvo.tasks.car import detection_3d_lib
import numpy as np
class Utils3DTest(test_utils.TestCase):
def testScaledHuberLoss(self):
utils_3d = detection_3d_lib.Utils3D()
labels = tf.constant([1, 2, 3], dtype=tf.float32)
# Predictions are less than delta, exactly at delta, and more than delta,
# respectively.
predictions = tf.constant([1.4, 1.2, 4.0], dtype=tf.float32)
delta = 0.8
expected_loss = [
1. / delta * 0.5 * (0.4)**2,
0.5 * delta,
1.0 - 0.5 * delta,
]
loss = utils_3d.ScaledHuberLoss(labels, predictions, delta=delta)
with self.session() as sess:
actual_loss = sess.run(loss)
self.assertAllClose(actual_loss, expected_loss)
def testCornerLoss(self):
utils_3d = detection_3d_lib.Utils3D()
gt_bboxes = tf.constant([[[[0., 0., 0., 1., 1., 1., 0.],
[0., 0., 0., 1., 1., 1., 0.],
[0., 0., 0., 1., 1., 1., 0.],
[0., 0., 0., 1., 1., 1., 0.],
[0., 0., 0., 1., 1., 1., 0.]]]])
predicted_bboxes = tf.constant([[[
[0., 0., 0., 1., 1., 1., 0.], # Same as GT
[0., 0., 0., 1., 1., 1., np.pi], # Opposite heading
[0., 0., 0., 1., 1., 1., np.pi / 2.], # 90-deg rotation
[1., 1., 1., 1., 1., 1., 0], # Different center
[0., 0., 0., 2., 2., 2., 0], # Different size
]]])
expected_loss = [[[
0.,
0.,
7.5,
8. * np.sqrt(3.) - 0.5,
8. * np.sqrt(0.75) - 0.5,
]]]
loss = utils_3d.CornerLoss(gt_bboxes, predicted_bboxes)
with self.session() as sess:
actual_loss = sess.run(loss)
self.assertAllClose(actual_loss, expected_loss)
def testCreateDenseCoordinates(self):
utils_3d = detection_3d_lib.Utils3D()
one_dim = utils_3d.CreateDenseCoordinates([(0.5, 1.5, 3)])
with self.session() as sess:
actual_one_dim = sess.run(one_dim)
self.assertAllEqual(actual_one_dim, [[0.5], [1.0], [1.5]])
two_by_two = utils_3d.CreateDenseCoordinates([(0, 1, 2), (1, 2, 2)])
with self.session() as sess:
actual_two_by_two = sess.run(two_by_two)
self.assertAllEqual(actual_two_by_two, [[0, 1], [0, 2], [1, 1], [1, 2]])
three_dims = utils_3d.CreateDenseCoordinates([(0, 1, 5), (1, 2, 5),
(0, 10, 5)])
self.assertAllEqual(three_dims.shape, [5 * 5 * 5, 3])
def testMakeAnchorBoxesWithoutRotation(self):
utils_3d = detection_3d_lib.Utils3D()
anchor_bboxes = utils_3d.MakeAnchorBoxes(
anchor_centers=tf.constant([[0, 0, 0], [1, 1, 1]], dtype=tf.float32),
anchor_box_dimensions=tf.constant([[1, 2, 3], [3, 4, 5]],
dtype=tf.float32),
anchor_box_offsets=tf.constant([[0, 0, 0], [1, 1, 1]],
dtype=tf.float32),
anchor_box_rotations=None)
with self.session() as sess:
actual_anchor_bboxes = sess.run(anchor_bboxes)
self.assertAllEqual(actual_anchor_bboxes,
[[[0, 0, 0, 1, 2, 3, 0], [1, 1, 1, 3, 4, 5, 0]],
[[1, 1, 1, 1, 2, 3, 0], [2, 2, 2, 3, 4, 5, 0]]])
def testMakeAnchorBoxesWithRotation(self):
utils_3d = detection_3d_lib.Utils3D()
anchor_bboxes = utils_3d.MakeAnchorBoxes(
anchor_centers=tf.constant([[0, 0, 0], [1, 1, 1]], dtype=tf.float32),
anchor_box_dimensions=tf.constant([[1, 2, 3], [3, 4, 5]],
dtype=tf.float32),
anchor_box_offsets=tf.constant([[0, 0, 0], [1, 1, 1]],
dtype=tf.float32),
anchor_box_rotations=tf.constant([0, 0.5]))
with self.session() as sess:
actual_anchor_bboxes = sess.run(anchor_bboxes)
self.assertAllEqual(actual_anchor_bboxes,
[[[0, 0, 0, 1, 2, 3, 0], [1, 1, 1, 3, 4, 5, 0.5]],
[[1, 1, 1, 1, 2, 3, 0], [2, 2, 2, 3, 4, 5, 0.5]]])
def testAssignAnchors(self):
utils_3d = detection_3d_lib.Utils3D()
anchor_bboxes = tf.constant(
[
[0, 1, 1, 2, 2, 2, 0], # Ignored
[-1, 1, 1, 2, 2, 2, 0], # Background
[0.9, 1, 1, 2, 2, 2, 0], # Foreground
[5, 5, 5, 1, 1, 2, 0], # Force matched to foreground
],
dtype=tf.float32)
# Second gt box should be forced match, third one should be ignored.
gt_bboxes = tf.constant([[1, 1, 1, 2, 2, 2, 0], [5, 5, 5, 2, 2, 2, 0],
[10, 10, 10, 2, 2, 2, 0]],
dtype=tf.float32)
gt_bboxes_labels = tf.constant([1, 2, 3])
gt_bboxes_mask = tf.constant([1, 1, 1])
assigned_anchors = utils_3d.AssignAnchors(
anchor_bboxes,
gt_bboxes,
gt_bboxes_labels,
gt_bboxes_mask,
foreground_assignment_threshold=0.5,
background_assignment_threshold=0.25)
with self.session() as sess:
actual_assigned_anchors, gt_bboxes = sess.run((assigned_anchors,
gt_bboxes))
self.assertAllEqual(actual_assigned_anchors.assigned_gt_labels,
[0, 0, 1, 2])
self.assertAllEqual(actual_assigned_anchors.assigned_gt_bbox, [
[0, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 1, 1, 1, 0],
[1, 1, 1, 2, 2, 2, 0],
[5, 5, 5, 2, 2, 2, 0],
])
self.assertAllEqual(actual_assigned_anchors.assigned_cls_mask,
[0, 1, 1, 1])
self.assertAllEqual(actual_assigned_anchors.assigned_reg_mask,
[0, 0, 1, 1])
self.assertAllEqual(
actual_assigned_anchors.assigned_gt_similarity_score.shape, [4])
def testAssignAnchorsWithoutForceMatch(self):
utils_3d = detection_3d_lib.Utils3D()
anchor_bboxes = tf.constant(
[
[0, 1, 1, 2, 2, 2, 0], # Ignored
[-1, 1, 1, 2, 2, 2, 0], # Background
[0.9, 1, 1, 2, 2, 2, 0], # Foreground
[5, 5, 5, 1, 1, 2, 0], # Background, since no force match
],
dtype=tf.float32)
# Second gt box should be forced match, third one should be ignored.
gt_bboxes = tf.constant([[1, 1, 1, 2, 2, 2, 0], [5, 5, 5, 2, 2, 2, 0],
[10, 10, 10, 2, 2, 2, 0]],
dtype=tf.float32)
gt_bboxes_labels = tf.constant([1, 2, 3])
gt_bboxes_mask = tf.constant([1, 1, 1])
assigned_anchors = utils_3d.AssignAnchors(
anchor_bboxes,
gt_bboxes,
gt_bboxes_labels,
gt_bboxes_mask,
foreground_assignment_threshold=0.5,
background_assignment_threshold=0.25,
force_match=False)
with self.session() as sess:
actual_assigned_anchors, gt_bboxes = sess.run((assigned_anchors,
gt_bboxes))
self.assertAllEqual(actual_assigned_anchors.assigned_gt_labels,
[0, 0, 1, 0])
self.assertAllEqual(actual_assigned_anchors.assigned_gt_bbox, [
[0, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 1, 1, 1, 0],
[1, 1, 1, 2, 2, 2, 0],
[0, 0, 0, 1, 1, 1, 0],
])
self.assertAllEqual(actual_assigned_anchors.assigned_cls_mask,
[0, 1, 1, 1])
self.assertAllEqual(actual_assigned_anchors.assigned_reg_mask,
[0, 0, 1, 0])
self.assertAllEqual(
actual_assigned_anchors.assigned_gt_similarity_score.shape, [4])
def testAssignAnchorsWithPadding(self):
utils_3d = detection_3d_lib.Utils3D()
anchor_bboxes = tf.constant([[0, 0, 0, 1, 2, 3, 0], [1, 1, 1, 3, 4, 5, 0.5],
[1, 1, 1, 1, 2, 3, 0], [2, 2, 2, 3, 4, 5,
0.5]])
gt_bboxes = anchor_bboxes + 0.05
gt_bboxes_labels = tf.constant([1, 2, 3, 4])
gt_bboxes_mask = tf.constant([1, 1, 0, 0])
assigned_anchors = utils_3d.AssignAnchors(anchor_bboxes, gt_bboxes,
gt_bboxes_labels, gt_bboxes_mask)
with self.session() as sess:
actual_assigned_anchors, gt_bboxes = sess.run((assigned_anchors,
gt_bboxes))
# Last two boxes are padded, thus not assigned.
self.assertAllEqual(actual_assigned_anchors.assigned_gt_labels,
[1, 2, 0, 0])
self.assertAllEqual(actual_assigned_anchors.assigned_gt_bbox[0:2, :],
gt_bboxes[0:2, :])
# 2nd and 3rd should match dummy bbox.
self.assertAllEqual(actual_assigned_anchors.assigned_gt_bbox[2, :],
[0, 0, 0, 1, 1, 1, 0])
self.assertAllEqual(actual_assigned_anchors.assigned_gt_bbox[3, :],
[0, 0, 0, 1, 1, 1, 0])
# First two are foreground, last two are background.
self.assertAllEqual(actual_assigned_anchors.assigned_cls_mask,
[1, 1, 1, 1])
self.assertAllEqual(actual_assigned_anchors.assigned_reg_mask,
[1, 1, 0, 0])
self.assertAllEqual(
actual_assigned_anchors.assigned_gt_similarity_score.shape, [4])
def testLocalizationResiduals(self):
utils_3d = detection_3d_lib.Utils3D()
anchor_bboxes = tf.constant([[1, 2, 3, 4, 3, 6, 0]], dtype=tf.float32)
gt_bboxes = tf.constant([[2, 22, 303, 4, 9, 12, 0.5]], dtype=tf.float32)
# diagonal_xy = 5 [since sqrt(3^2 + 4^2) = 5]
expected_residuals = np.asarray([[
1. / 5,
20. / 5,
300. / 6,
0.,
np.log(9. / 3.),
np.log(12. / 6.),
0.5,
]])
residuals = utils_3d.LocalizationResiduals(anchor_bboxes, gt_bboxes)
with self.session() as sess:
actual_residuals = sess.run(residuals)
self.assertAllClose(actual_residuals, expected_residuals)
def testResidualsToBBoxes(self):
utils_3d = detection_3d_lib.Utils3D()
anchor_bboxes = tf.constant([[1, 2, 3, 4, 3, 6, 0]], dtype=tf.float32)
expected_predicted_bboxes = np.asarray([[2, 22, 303, 4, 9, 12, 0.5]])
residuals = tf.constant([[
1. / 5, 20. / 5, 300. / 6, 0.,
np.log(9. / 3.),
np.log(12. / 6.),
0.5,
]], dtype=tf.float32) # pyformat: disable
predicted_bboxes = utils_3d.ResidualsToBBoxes(anchor_bboxes, residuals)
with self.session() as sess:
actual_predicted_bboxes = sess.run(predicted_bboxes)
self.assertAllClose(actual_predicted_bboxes, expected_predicted_bboxes)
def testZeroResiduals(self):
utils_3d = detection_3d_lib.Utils3D()
anchor_bboxes = tf.constant([[1, 2, 3, 4, 3, 6, 0]], dtype=tf.float32)
expected_predicted_bboxes = np.asarray([[1, 2, 3, 4, 3, 6, 0]])
residuals = tf.zeros((1, 7))
predicted_bboxes = utils_3d.ResidualsToBBoxes(anchor_bboxes, residuals)
with self.session() as sess:
actual_predicted_bboxes = sess.run(predicted_bboxes)
self.assertAllClose(actual_predicted_bboxes, expected_predicted_bboxes)
def testResidualsToBBoxPhiFloorMod(self):
utils_3d = detection_3d_lib.Utils3D()
anchor_bboxes = tf.constant([[1, 2, 3, 4, 3, 6, np.pi]], dtype=tf.float32)
# We expected the returned phi value to be floormod w.r.t. pi.
expected_predicted_bboxes = np.asarray([[1, 2, 3, 4, 3, 6, 1.]])
residuals = tf.constant([[0, 0, 0, 0, 0, 0, 1.0]], dtype=tf.float32)
predicted_bboxes = utils_3d.ResidualsToBBoxes(anchor_bboxes, residuals)
with self.session() as sess:
actual_predicted_bboxes = sess.run(predicted_bboxes)
self.assertAllClose(actual_predicted_bboxes, expected_predicted_bboxes)
def testNMSIndices(self):
utils_3d = detection_3d_lib.Utils3D()
# Create three anchor boxes, two largely overlapping and one
# not overlapping with either.
#
# Set a batch size of 1 and use the Batched version to test
# both functions.
anchor_bboxes = tf.constant(
[[[1, 2, 3, 4, 3, 6, 0.], [1, 2, 2, 4, 3, 6, 0.],
[10, 20, 30, 4, 3, 6, 0.]]],
dtype=tf.float32)
# Treat them all as high scores.
scores = tf.constant([[0.7, 0.8, 0.6]])
with self.session() as sess:
nms_indices, valid_mask = utils_3d.BatchedNMSIndices(
anchor_bboxes, scores)
indices, mask = sess.run([nms_indices, valid_mask])
# One box is filtered out.
self.assertEqual(2, np.sum(mask))
# The two boxes that remain are the second one (because of its higher
# score) and the last one (which overlaps with nothing).
self.assertAllEqual([[1, 2, 0]], indices)
# Flip the scores; expect the first box to be chosen instead.
# Change the last box's threshold to be 0.0, so that the
# default setting for the score threshold filters it out too.
scores_2 = tf.constant([[0.8, 0.7, 0.0]])
nms_indices, valid_mask = utils_3d.BatchedNMSIndices(
anchor_bboxes, scores_2)
indices, mask = sess.run([nms_indices, valid_mask])
self.assertEqual(1, np.sum(mask))
self.assertAllEqual([[0, 0, 0]], indices)
def testOrientedNMSIndices(self):
utils_3d = detection_3d_lib.Utils3D()
# Assignments and IoU scores calculated offline.
bboxes_data = tf.constant(
[[
[10.35, 8.429, -1.003, 3.7, 1.64, 1.49, 1.582],
[10.35, 8.429, -1.003, 3.7, 1.64, 1.49, 0.0], # box 0 rotated
[11.5, 8.429, -1.003, 3.7, 1.64, 1.49, 1.0], # Rotated to overlap
[13.01, 8.149, -0.953, 4.02, 1.55, 1.52, 1.592],
[13.51, 8.39, -1.0, 4.02, 1.55, 1.52, 1.592], # Slight translation
[13.51, 8.39, -1.0, 1.0, 1.0, 1.52, 1.592], # Smaller box
[13.51, 8.39, -1.0, 1.0, 1.0, 1.52, 1.9], # Smaller box
]],
dtype=tf.float32)
# Notes on the data:
# Lets say we have 3 classes and a thresh of 0.1
# Keep box [0, 3] for class 0
# Keep box [6] only for class 1
# Keep box [2] for class 2
scores_data = tf.constant([[
[0.9, 0.1, 0.0],
[0.89, 0.1, 0.01],
[0.5, 0.01, 0.49],
[0.8, 0.1, 0.1],
[0.79, 0.11, 0.2],
[0.2, 0.8, 0.1],
[0.1, 0.9, 0.0],
]],
dtype=tf.float32)
with self.session() as sess:
outputs = utils_3d.BatchedOrientedNMSIndices(
bboxes_data,
scores_data,
nms_iou_threshold=0.1,
score_threshold=0.3,
max_boxes_per_class=5)
indices, scores, valid_mask = sess.run(outputs)
class_masks = [
valid_mask[0, cls_idx, :].astype(np.bool) for cls_idx in range(3)
]
# Check the correct number of valid results per class
self.assertEqual(class_masks[0].sum(), 2)
self.assertEqual(class_masks[1].sum(), 1)
self.assertEqual(class_masks[2].sum(), 1)
# Check the results for each class
self.assertAllEqual(indices[0, 0, class_masks[0]], [0, 3])
self.assertAllClose(scores[0, 0, class_masks[0]], [0.9, 0.8])
self.assertAllEqual(indices[0, 1, class_masks[1]], [6])
self.assertAllClose(scores[0, 1, class_masks[1]], [0.9])
self.assertAllEqual(indices[0, 2, class_masks[2]], [2])
self.assertAllClose(scores[0, 2, class_masks[2]], [0.49])
# Use a list of score thresholds instead
outputs = utils_3d.BatchedOrientedNMSIndices(
bboxes_data,
scores_data,
nms_iou_threshold=[0.1, 0.1, 0.1],
score_threshold=[0.899, 0.5, 0.3],
max_boxes_per_class=5)
indices, scores, valid_mask = sess.run(outputs)
class_masks = [
valid_mask[0, cls_idx, :].astype(np.bool) for cls_idx in range(3)
]
# Check the correct number of valid results per class
self.assertEqual(class_masks[0].sum(), 1)
self.assertEqual(class_masks[1].sum(), 1)
self.assertEqual(class_masks[2].sum(), 1)
# Check the results for each class
self.assertAllEqual(indices[0, 0, class_masks[0]], [0])
self.assertAllClose(scores[0, 0, class_masks[0]], [0.9])
self.assertAllEqual(indices[0, 1, class_masks[1]], [6])
self.assertAllClose(scores[0, 1, class_masks[1]], [0.9])
self.assertAllEqual(indices[0, 2, class_masks[2]], [2])
self.assertAllClose(scores[0, 2, class_masks[2]], [0.49])
def testRandomPadOrTrimToTrim(self):
points = tf.constant([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.],
[10., 11., 12.]])
features = tf.constant([[100.], [200.], [300.], [400.]])
points, features = detection_3d_lib.RandomPadOrTrimTo([points, features],
2,
seed=123)[0]
with self.session() as sess:
points_np, features_np = sess.run([points, features])
# Slicing choose a random 2 points.
self.assertAllClose([[1., 2., 3.], [10., 11., 12.]], points_np)
self.assertAllClose([[100.], [400.]], features_np)
def testRandomPadOrTrimToPad(self):
points = tf.constant([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.],
[10., 11., 12.]])
features = tf.constant([[100.], [200.], [300.], [400.]])
points, features = detection_3d_lib.RandomPadOrTrimTo([points, features],
10,
seed=123)[0]
with self.session() as sess:
points_np, features_np = sess.run([points, features])
# Padding repeats a random set of points.
self.assertAllClose([[1., 2., 3.], [1., 2., 3.], [10., 11., 12.],
[7., 8., 9.], [7., 8., 9.], [4., 5., 6.]],
points_np[4:])
self.assertAllClose([[100.], [100.], [400.], [300.], [300.], [200.]],
features_np[4:])
def testRandomPadOrTrimToEmpty(self):
points = tf.constant([[1., 2., 3.]])
features = tf.constant([[100.]])
points, features = detection_3d_lib.RandomPadOrTrimTo(
[points[0:0], features[0:0]], 10, seed=123)[0]
with self.session() as sess:
points_np, features_np = sess.run([points, features])
self.assertAllClose(points_np, np.zeros(shape=(10, 3)))
self.assertAllClose(features_np, np.zeros(shape=(10, 1)))
def testCornersToImagePlane(self):
utils_3d = detection_3d_lib.Utils3D()
batch = 4
num_boxes = 50
corners = tf.random.uniform([batch, num_boxes, 8, 3])
velo_to_image_plane = tf.random.uniform([batch, 3, 4])
corners_to_image_plane = utils_3d.CornersToImagePlane(
corners, velo_to_image_plane)
self.assertEqual([batch, num_boxes, 8, 2], corners_to_image_plane.shape)
if __name__ == '__main__':
tf.test.main()
|
py | b411a93faf98ee2936a0056412049c5422c11c05 | #!/usr/bin/env python3
# Copyright 2017 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from xi_plugin import start_plugin, Plugin
try:
import enchant
except ImportError:
import sys
print("spellcheck plugin requires pyenchant: https://github.com/rfk/pyenchant",
file=sys.stderr, flush=True)
sys.exit(1)
class Spellcheck(Plugin):
"""Basic spellcheck using pyenchant."""
def __init__(self):
super(Spellcheck, self).__init__()
lang = os.environ.get("LC_CTYPE", "en_US.utf-8").split('.')[0]
self.dictionary = enchant.Dict(lang)
self.print_err("loaded dictionary for {}".format(lang))
self.in_word = False
self.has_sent_scopes = False
def update(self, view, author, rev, start, end,
new_len, edit_type, text=None):
if not self.has_sent_scopes:
view.add_scopes([['invalid.illegal.spellcheck']])
self.has_sent_scopes = True
if author == self.identifier:
pass
elif not self.in_word and text.isalpha():
self.in_word = True
# punctuation not exhaustive, this is a demo ;)
elif self.in_word and (text.isspace() or text in ["!", ",", ".", ":", ";", "?"]):
self.in_word = False
line, col = view.lines.linecol_for_offset(end)
prev_word = view.lines.previous_word(end)
# TODO: libs should provide some "Text" object, which represents some string,
# and provides convenience methods for getting relevant offsets, setting styles, etc
if prev_word and not self.dictionary.check(prev_word):
# we apply spans in groups; spans within a group may overlap.
# A span within a group is offset relative to group's start offset.
spans = [{'start': 0,
'end': len(prev_word),
'scope_id': 0}]
view.update_spans(end-len(prev_word), len(prev_word), spans, rev)
return 0
def main():
start_plugin(Spellcheck())
if __name__ == "__main__":
main()
|
py | b411a9a08b525552ec4aa60e203ca18ab5b6f8fb |
try:
print('hello');
except
print('Error!'); |
py | b411aa96501b88d534b1e0825347dd4c11775303 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-20 09:16
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('contact_manager', '0007_auto_20161020_0758'),
('servicecatalog', '0047_auto_20161018_1437'),
]
operations = [
migrations.CreateModel(
name='ModuleContacts',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('contact', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='servicecatalog.Contact')),
('contact_role', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contact_manager.ContactRole')),
('module', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='servicecatalog.Module')),
],
),
migrations.AlterUniqueTogether(
name='modulecontacts',
unique_together=set([('module', 'contact', 'contact_role')]),
),
]
|
py | b411ab2dee201cb53e1d1b7d25513c10e7330837 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from collections import defaultdict
from django.utils.translation import ugettext_lazy as _
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from aldryn_people import models, forms, DEFAULT_APP_NAMESPACE
from .utils import get_valid_languages
NAMESPACE_ERROR = _(
"Seems that there is no valid application hook for aldryn-people."
"Links can't be rendered without an app hook."
)
class PeoplePlugin(CMSPluginBase):
TEMPLATE_NAME = 'aldryn_people/plugins/%s/people_list.html'
module = 'People'
render_template = TEMPLATE_NAME % models.PeoplePlugin.STYLE_CHOICES[0][0]
name = _('People list')
model = models.PeoplePlugin
fieldsets = (
(None, {
'fields': (
'style',
),
}),
(_('People'), {
'description': _('Select and arrange specific people, or leave '
'blank to use all.'),
'fields': (
'people',
)
}),
(_('Options'), {
'fields': (
('group_by_group', 'show_ungrouped', ),
'show_links',
'show_vcard',
)
})
)
def group_people(self, people):
groups = defaultdict(list)
for person in people:
for group in person.groups.all():
groups[group].append(person)
# Fixes a template resolution-related issue. See:
# http://stackoverflow.com/questions/4764110/django-template-cant-loop-defaultdict # noqa
groups.default_factory = None
return groups
def render(self, context, instance, placeholder):
people = instance.get_selected_people()
if not people:
people = models.Person.objects.published()
valid_languages = get_valid_languages(
DEFAULT_APP_NAMESPACE, instance.language, context['request'])
people = people.translated(*valid_languages)
if not valid_languages:
context['namespace_error'] = NAMESPACE_ERROR
self.render_template = self.TEMPLATE_NAME % instance.style
context['instance'] = instance
context['people'] = people
if instance.group_by_group:
context['people_groups'] = self.group_people(people)
if instance.show_ungrouped:
groupless = people.filter(groups__isnull=True)
else:
groupless = people.none()
context['groupless_people'] = groupless
else:
context['people_groups'] = []
context['groupless_people'] = people.none()
return context
plugin_pool.register_plugin(PeoplePlugin)
@plugin_pool.register_plugin
class RelatedPeoplePlugin(CMSPluginBase):
TEMPLATE_NAME = 'aldryn_people/plugins/related_people__%s.html'
module = 'People'
render_template = TEMPLATE_NAME % forms.LAYOUT_CHOICES[0][0]
name = _('Related People')
model = models.RelatedPeoplePlugin
form = forms.RelatedPeoplePluginForm
def render(self, context, instance, placeholder):
request = context.get('request')
context['instance'] = instance
context['title'] = instance.title
context['icon'] = instance.icon
context['image'] = instance.image
qs = instance.related_people.published()
related_groups = instance.related_groups.all()
related_locations = instance.related_locations.all()
related_categories = instance.related_categories.all()
related_services = instance.related_services.all()
if not qs.exists():
qs = models.Person.objects.published().distinct()
if related_groups.exists():
qs = qs.filter(groups__in=related_groups)
if related_locations.exists():
qs = qs.filter(location__in=related_locations)
if related_categories.exists():
qs = qs.filter(categories__in=related_categories)
if related_services.exists():
qs = qs.filter(services__in=related_services)
context['related_people'] = qs[:int(instance.number_of_people)]
return context
def get_render_template(self, context, instance, placeholder):
return self.TEMPLATE_NAME % instance.layout
|
py | b411abec77c02ac664d630ab8dacd023032e16fd | """
Surface Boxplot
====================
Shows the use of the surface boxplot, which is a generalization of the
functional boxplot for FDataGrid whose domain dimension is 2.
"""
# Author: Amanda Hernando Bernabé
# License: MIT
# sphinx_gallery_thumbnail_number = 3
from skfda import FDataGrid
from skfda.datasets import make_gaussian_process
from skfda.exploratory.visualization import SurfaceBoxplot, Boxplot
import matplotlib.pyplot as plt
import numpy as np
##############################################################################
# In order to instantiate a
# :class:`~skfda.exploratory.visualization.SurfaceBoxplot`, a functional data
# object with bidimensional domain must be generated. In this example, a
# FDataGrid representing a function
# :math:`f : \mathbb{R}^2\longmapsto\mathbb{R}` is constructed,
# using as an example a Brownian process extruded into another dimension.
#
# The values of the Brownian process are generated using
# :func:`~skfda.datasets.make_gaussian_process`,
# Those functions return FDataGrid objects whose ``data_matrix``
# store the values needed.
n_samples = 10
n_features = 10
fd = make_gaussian_process(n_samples=n_samples, n_features=n_features,
random_state=1)
fd.dataset_name = "Brownian process"
##############################################################################
# After, those values generated for one dimension on the domain are extruded
# along another dimension, obtaining a three-dimensional matrix or cube
# (two-dimensional domain and one-dimensional image).
cube = np.repeat(fd.data_matrix, n_features).reshape(
(n_samples, n_features, n_features))
##############################################################################
# We can plot now the extruded trajectories.
fd_2 = FDataGrid(data_matrix=cube,
grid_points=np.tile(fd.grid_points, (2, 1)),
dataset_name="Extruded Brownian process")
fd_2.plot()
##############################################################################
# Since matplotlib was initially designed with only two-dimensional plotting
# in mind, the three-dimensional plotting utilities were built on top of
# matplotlib's two-dimensional display, and the result is a convenient (if
# somewhat limited) set of tools for three-dimensional data visualization as
# we can observe.
#
# For this reason, the profiles of the surfaces, which are contained in the
# first two generated functional data objects, are plotted below, to help to
# visualize the data.
fd.plot()
##############################################################################
# To terminate the example, the instantiation of the
# :class:`~skfda.exploratory.visualization.SurfaceBoxplot` object is
# made, showing the surface boxplot which corresponds to our FDataGrid
surfaceBoxplot = SurfaceBoxplot(fd_2)
surfaceBoxplot.plot()
##############################################################################
# The surface boxplot contains the median, the central envelope and the
# outlying envelope plotted from darker to lighter colors, although they can
# be customized.
#
# Analogous to the procedure followed before of plotting the three-dimensional
# data and their correponding profiles, we can obtain also the functional
# boxplot for one-dimensional data with the
# :class:`~skfda.exploratory.visualization.Boxplot` passing as arguments the
# first FdataGrid object. The profile of the surface boxplot is obtained.
boxplot1 = Boxplot(fd)
boxplot1.plot()
|
py | b411ad7d4af745d86352f443cb36b164772a07e6 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
from typing import Optional, Set
import logging
from overrides import overrides
from google.cloud.pubsublite.internal.wait_ignore_cancelled import wait_ignore_errors
from google.cloud.pubsublite.internal.wire.assigner import Assigner
from google.cloud.pubsublite.internal.wire.retrying_connection import (
RetryingConnection,
ConnectionFactory,
)
from google.api_core.exceptions import FailedPrecondition, GoogleAPICallError
from google.cloud.pubsublite.internal.wire.connection_reinitializer import (
ConnectionReinitializer,
)
from google.cloud.pubsublite.internal.wire.connection import Connection
from google.cloud.pubsublite.types.partition import Partition
from google.cloud.pubsublite_v1.types import (
PartitionAssignmentRequest,
PartitionAssignment,
InitialPartitionAssignmentRequest,
PartitionAssignmentAck,
)
_LOGGER = logging.getLogger(__name__)
# Maximum bytes per batch at 3.5 MiB to avoid GRPC limit of 4 MiB
_MAX_BYTES = int(3.5 * 1024 * 1024)
# Maximum messages per batch at 1000
_MAX_MESSAGES = 1000
class AssignerImpl(
Assigner, ConnectionReinitializer[PartitionAssignmentRequest, PartitionAssignment]
):
_initial: InitialPartitionAssignmentRequest
_connection: RetryingConnection[PartitionAssignmentRequest, PartitionAssignment]
_outstanding_assignment: bool
_receiver: Optional[asyncio.Future]
# A queue that may only hold one element with the next assignment.
_new_assignment: "asyncio.Queue[Set[Partition]]"
def __init__(
self,
initial: InitialPartitionAssignmentRequest,
factory: ConnectionFactory[PartitionAssignmentRequest, PartitionAssignment],
):
self._initial = initial
self._connection = RetryingConnection(factory, self)
self._outstanding_assignment = False
self._receiver = None
self._new_assignment = asyncio.Queue(maxsize=1)
async def __aenter__(self):
await self._connection.__aenter__()
return self
def _start_receiver(self):
assert self._receiver is None
self._receiver = asyncio.ensure_future(self._receive_loop())
async def _stop_receiver(self):
if self._receiver:
self._receiver.cancel()
await wait_ignore_errors(self._receiver)
self._receiver = None
async def _receive_loop(self):
while True:
response = await self._connection.read()
if self._outstanding_assignment or not self._new_assignment.empty():
self._connection.fail(
FailedPrecondition(
"Received a duplicate assignment on the stream while one was outstanding."
)
)
return
self._outstanding_assignment = True
partitions = set()
for partition in response.partitions:
partitions.add(Partition(partition))
self._new_assignment.put_nowait(partitions)
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self._stop_receiver()
await self._connection.__aexit__(exc_type, exc_val, exc_tb)
@overrides
async def stop_processing(self, error: GoogleAPICallError):
await self._stop_receiver()
self._outstanding_assignment = False
while not self._new_assignment.empty():
self._new_assignment.get_nowait()
@overrides
async def reinitialize(
self, connection: Connection[PartitionAssignmentRequest, PartitionAssignment],
):
await connection.write(PartitionAssignmentRequest(initial=self._initial))
self._start_receiver()
async def get_assignment(self) -> Set[Partition]:
if self._outstanding_assignment:
try:
await self._connection.write(
PartitionAssignmentRequest(ack=PartitionAssignmentAck())
)
self._outstanding_assignment = False
except GoogleAPICallError as e:
# If there is a failure to ack, keep going. The stream likely restarted.
_LOGGER.debug(
f"Assignment ack attempt failed due to stream failure: {e}"
)
return await self._connection.await_unless_failed(self._new_assignment.get())
|
py | b411ae0eedbdc8fbc56784bf4bbcc4b2c432aee7 | import os
import subprocess
from typing import Any
from typing import cast
from typing import Generator
from typing import Iterable
from typing import List
from typing import Set
from typing import Tuple
from typing import Union
from ..filters.allowlist import is_line_allowlisted
from ..settings import get_filters
from ..settings import get_plugins
from ..settings import get_settings
from ..transformers import get_transformed_file
from ..types import NamedIO
from ..types import SelfAwareCallable
from ..util import git
from ..util.code_snippet import get_code_snippet
from ..util.inject import call_function_with_arguments
from ..util.path import get_relative_path_if_in_cwd
from .log import log
from .plugins import Plugin
from .potential_secret import PotentialSecret
def get_files_to_scan(
*paths: str,
should_scan_all_files: bool = False
) -> Generator[str, None, None]:
"""
If we specify specific files, we should be able to scan them. This abides by the
Principle of Least Surprise -- so users don't have to do:
$ detect-secrets scan test_data/config.env --all-files
to scan the specific file.
>>> list(get_files_to_scan('test_data/config.env')) == ['test_data/config.env']
In a similar way,
>>> list(get_files_to_scan('test_data/config.env', '.secrets.baseline')) == \
... ['test_data/config.env', '.secrets.baseline']
If we specify directories, then we should use git tracked files when possible. To
override this behavior, we can specify `should_scan_all_files=True`, which will force
the scan for all files.
See test cases for more details.
"""
# First, we determine the appropriate filtering mode to be used.
# If this is True, then it will consider everything to be valid.
# Otherwise, it will only list the files that are valid.
valid_paths: Union[bool, Set[str]] = True
for path in paths:
# Since this is not a directory, we assume that it is a file proper, and automatically
# consider it valid.
if not os.path.isdir(path):
continue
if not should_scan_all_files:
try:
valid_paths = git.get_tracked_files(git.get_root_directory())
except subprocess.CalledProcessError:
log.warning('Did not detect git repository. Try scanning all files instead.')
valid_paths = False
# Since valid_paths attempts to get *all* tracked files in the repository, we just need
# to initialize it once.
break
if not valid_paths:
yield from []
return
for path in paths:
iterator = (
cast(List[Tuple], [(os.getcwd(), None, [path])])
if os.path.isfile(path)
else os.walk(path)
)
for path_root, _, filenames in iterator:
for filename in filenames:
relative_path = get_relative_path_if_in_cwd(os.path.join(path_root, filename))
if not relative_path:
# e.g. symbolic links may be pointing outside the root directory
continue
if (
valid_paths is True
or relative_path in cast(Set[str], valid_paths)
):
yield relative_path
def scan_line(line: str) -> Generator[PotentialSecret, None, None]:
"""Used for adhoc string scanning."""
# Disable this, since it doesn't make sense to run this for adhoc usage.
get_settings().disable_filters(
'detect_secrets.filters.common.is_invalid_file',
)
get_filters.cache_clear()
yield from (
secret
for plugin in get_plugins()
for secret in _scan_line(
plugin=plugin,
filename='adhoc-string-scan',
line=line,
line_number=0,
enable_eager_search=True,
)
if not _is_filtered_out(
required_filter_parameters=['context'],
filename=secret.filename,
secret=secret.secret_value,
plugin=plugin,
line=line,
context=get_code_snippet(
lines=[line],
line_number=1,
),
)
)
def scan_file(filename: str) -> Generator[PotentialSecret, None, None]:
if not get_plugins(): # pragma: no cover
log.error('No plugins to scan with!')
return
if _is_filtered_out(required_filter_parameters=['filename'], filename=filename):
return
try:
has_secret = False
for lines in _get_lines_from_file(filename):
for secret in _process_line_based_plugins(
lines=list(enumerate(lines, 1)),
filename=filename,
):
has_secret = True
yield secret
if has_secret:
break
except IOError:
log.warning(f'Unable to open file: {filename}')
return
def scan_diff(diff: str) -> Generator[PotentialSecret, None, None]:
"""
:raises: ImportError
"""
if not get_plugins(): # pragma: no cover
log.error('No plugins to scan with!')
return
for filename, lines in _get_lines_from_diff(diff):
yield from _process_line_based_plugins(lines, filename=filename)
def scan_for_allowlisted_secrets_in_file(filename: str) -> Generator[PotentialSecret, None, None]:
"""
Developers are able to add individual lines to the allowlist using
`detect_secrets.filters.allowlist.is_line_allowlisted`. However, there are
times when we want to verify that no *actual* secrets are added to the codebase
via this feature.
This scans specifically for these lines, and ignores everything else.
"""
if not get_plugins(): # pragma: no cover
log.error('No plugins to scan with!')
return
if _is_filtered_out(
required_filter_parameters=['filename'],
filename=filename,
):
return
# NOTE: Unlike `scan_file`, we don't ever have to use eager file transfomers, since we already
# know which lines we want to scan.
try:
for lines in _get_lines_from_file(filename):
yield from _scan_for_allowlisted_secrets_in_lines(enumerate(lines, 1), filename)
break
except IOError:
log.warning(f'Unable to open file: {filename}')
return
def scan_for_allowlisted_secrets_in_diff(diff: str) -> Generator[PotentialSecret, None, None]:
if not get_plugins(): # pragma: no cover
log.error('No plugins to scan with!')
return
for filename, lines in _get_lines_from_diff(diff):
yield from _scan_for_allowlisted_secrets_in_lines(lines, filename)
def _scan_for_allowlisted_secrets_in_lines(
lines: Iterable[Tuple[int, str]],
filename: str,
) -> Generator[PotentialSecret, None, None]:
# We control the setting here because it makes more sense than requiring the caller
# to set this setting before calling this function.
get_settings().disable_filters('detect_secrets.filters.allowlist.is_line_allowlisted')
get_filters.cache_clear()
line_numbers, lines = zip(*lines)
line_content = [line.rstrip() for line in lines]
for line_number, line in zip(line_numbers, line_content):
if not is_line_allowlisted(
filename,
line,
context=get_code_snippet(line_content, line_number),
):
continue
if _is_filtered_out(required_filter_parameters=['line'], filename=filename, line=line):
continue
for plugin in get_plugins():
yield from _scan_line(plugin, filename, line, line_number)
def _get_lines_from_file(filename: str) -> Generator[List[str], None, None]:
"""
This attempts to get lines in a given file. If no more lines are needed, the caller
is responsible for breaking out of this loop.
:raises: IOError
:raises: FileNotFoundError
"""
with open(filename) as f:
log.info(f'Checking file: {filename}')
try:
lines = get_transformed_file(cast(NamedIO, f))
if not lines:
lines = f.readlines()
except UnicodeDecodeError:
# We flat out ignore binary files
return
yield lines
# If the above lines don't prove to be useful to the caller, try using eager transformers.
f.seek(0)
lines = get_transformed_file(cast(NamedIO, f), use_eager_transformers=True)
if not lines:
return
yield lines
def _get_lines_from_diff(diff: str) -> Generator[Tuple[str, List[Tuple[int, str]]], None, None]:
"""
:raises: ImportError
"""
# Local imports, so that we don't need to require unidiff for versions of
# detect-secrets that don't use it.
from unidiff import PatchSet
patch_set = PatchSet.from_string(diff)
for patch_file in patch_set:
filename = patch_file.path
if _is_filtered_out(required_filter_parameters=['filename'], filename=filename):
continue
yield (
filename,
[
(line.target_line_no, line.value)
for chunk in patch_file
# target_lines refers to incoming (new) changes
for line in chunk.target_lines()
if line.is_added
],
)
def _process_line_based_plugins(
lines: List[Tuple[int, str]],
filename: str,
) -> Generator[PotentialSecret, None, None]:
line_content = [line[1] for line in lines]
# NOTE: We iterate through lines *then* plugins, because we want to quit early if any of the
# filters return True.
for line_number, line in lines:
line = line.rstrip()
code_snippet = get_code_snippet(
lines=line_content,
line_number=line_number,
)
# We apply line-specific filters, and see whether that allows us to quit early.
if _is_filtered_out(
required_filter_parameters=['line'],
filename=filename,
line=line,
context=code_snippet,
):
continue
yield from (
secret
for plugin in get_plugins()
for secret in _scan_line(plugin, filename, line, line_number)
if not _is_filtered_out(
required_filter_parameters=['context'],
filename=secret.filename,
secret=secret.secret_value,
plugin=plugin,
line=line,
context=code_snippet,
)
)
def _scan_line(
plugin: Plugin,
filename: str,
line: str,
line_number: int,
**kwargs: Any,
) -> Generator[PotentialSecret, None, None]:
# NOTE: We don't apply filter functions here yet, because we don't have any filters
# that operate on (filename, line, plugin) without `secret`
secrets = call_function_with_arguments(
plugin.analyze_line,
filename=filename,
line=line,
line_number=line_number,
**kwargs,
)
if not secrets:
return
yield from (
secret
for secret in secrets
if not _is_filtered_out(
required_filter_parameters=['secret'],
filename=secret.filename,
secret=secret.secret_value,
plugin=plugin,
line=line,
)
)
def _is_filtered_out(required_filter_parameters: Iterable[str], **kwargs: Any) -> bool:
for filter_fn in get_filters_with_parameter(*required_filter_parameters):
try:
if call_function_with_arguments(filter_fn, **kwargs):
if 'secret' in kwargs:
debug_msg = f'Skipping "{kwargs["secret"]}" due to `{filter_fn.path}`.'
elif list(kwargs.keys()) == ['filename']:
# We want to make sure this is only run if we're skipping files (as compared
# to other filters that may include `filename` as a parameter).
debug_msg = f'Skipping "{kwargs["filename"]}" due to `{filter_fn.path}`'
else:
debug_msg = f'Skipping secret due to `{filter_fn.path}`.'
log.debug(debug_msg)
return True
except TypeError:
# Skipping non-compatible filters
pass
return False
def get_filters_with_parameter(*parameters: str) -> List[SelfAwareCallable]:
"""
The issue of our method of dependency injection is that functions will be called multiple
times. For example, if we have two functions:
>>> def foo(filename: str): ...
>>> def bar(filename: str, secret: str): ...
our invocation of `call_function_with_arguments(filename=filename, secret=secret)`
will run both of these functions. While expected, this results in multiple invocations of
the same function, which can be less than ideal (especially if we have a heavy duty filter).
To address this, we filter our filters with this function. It will return the functions
that accept a minimum set of parameters, to avoid duplicative work. For instance,
>>> get_filters_with_parameter('secret')
[bar]
"""
minimum_parameters = set(parameters)
return [
filter
for filter in get_filters()
if minimum_parameters <= filter.injectable_variables
]
|
py | b411ae74d278f3a507cf178760c03afc0697b526 | #
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
import json
import os
import pathlib
import re
import shutil
import tempfile
from distutils.dir_util import copy_tree
from typing import Any, Dict
import pytest
from integration_tests.dbt_integration_test import DbtIntegrationTest
from normalization.destination_type import DestinationType
from normalization.transform_catalog.catalog_processor import CatalogProcessor
temporary_folders = set()
# dbt models and final sql outputs from the following git versioned tests will be written in a folder included in
# airbyte git repository.
git_versioned_tests = ["test_simple_streams", "test_nested_streams"]
dbt_test_utils = DbtIntegrationTest()
@pytest.fixture(scope="module", autouse=True)
def before_all_tests(request):
destinations_to_test = dbt_test_utils.get_test_targets()
for integration_type in [d.value for d in DestinationType]:
if integration_type in destinations_to_test:
test_root_dir = f"{pathlib.Path().absolute()}/normalization_test_output/{integration_type.lower()}"
shutil.rmtree(test_root_dir, ignore_errors=True)
if os.getenv("RANDOM_TEST_SCHEMA"):
target_schema = dbt_test_utils.generate_random_string("test_normalization_ci_")
dbt_test_utils.set_target_schema(target_schema)
dbt_test_utils.change_current_test_dir(request)
dbt_test_utils.setup_db(destinations_to_test)
os.environ["PATH"] = os.path.abspath("../.venv/bin/") + ":" + os.environ["PATH"]
yield
dbt_test_utils.tear_down_db()
for folder in temporary_folders:
print(f"Deleting temporary test folder {folder}")
shutil.rmtree(folder, ignore_errors=True)
# TODO delete target_schema in destination by copying dbt_project.yml and injecting a on-run-end hook to clean up
@pytest.fixture
def setup_test_path(request):
dbt_test_utils.change_current_test_dir(request)
print(f"Running from: {pathlib.Path().absolute()}")
print(f"Current PATH is: {os.environ['PATH']}")
yield
os.chdir(request.config.invocation_dir)
@pytest.mark.parametrize(
"test_resource_name",
set(
git_versioned_tests
+ [
# Non-versioned tests outputs below will be written to /tmp folders instead
]
),
)
@pytest.mark.parametrize("destination_type", list(DestinationType))
def test_normalization(destination_type: DestinationType, test_resource_name: str, setup_test_path):
if destination_type.value not in dbt_test_utils.get_test_targets():
pytest.skip(f"Destinations {destination_type} is not in NORMALIZATION_TEST_TARGET env variable")
if (
destination_type.value in (DestinationType.ORACLE.value, DestinationType.CLICKHOUSE.value)
and test_resource_name == "test_nested_streams"
):
pytest.skip(f"Destinations {destination_type} does not support nested streams")
target_schema = dbt_test_utils.target_schema
if destination_type.value == DestinationType.ORACLE.value:
# Oracle does not allow changing to random schema
dbt_test_utils.set_target_schema("test_normalization")
try:
run_test_normalization(destination_type, test_resource_name)
finally:
dbt_test_utils.set_target_schema(target_schema)
def run_test_normalization(destination_type: DestinationType, test_resource_name: str):
print(f"Testing normalization {destination_type} for {test_resource_name} in ", dbt_test_utils.target_schema)
# Create the test folder with dbt project and appropriate destination settings to run integration tests from
test_root_dir = setup_test_dir(destination_type, test_resource_name)
run_first_normalization(destination_type, test_resource_name, test_root_dir)
if os.path.exists(os.path.join("resources", test_resource_name, "data_input", "messages_incremental.txt")):
run_incremental_normalization(destination_type, test_resource_name, test_root_dir)
if os.path.exists(os.path.join("resources", test_resource_name, "data_input", "messages_schema_change.txt")):
run_schema_change_normalization(destination_type, test_resource_name, test_root_dir)
def run_first_normalization(destination_type: DestinationType, test_resource_name: str, test_root_dir: str):
destination_config = dbt_test_utils.generate_profile_yaml_file(destination_type, test_root_dir)
# Use destination connector to create _airbyte_raw_* tables to use as input for the test
assert setup_input_raw_data(destination_type, test_resource_name, test_root_dir, destination_config)
# generate models from catalog
generate_dbt_models(destination_type, test_resource_name, test_root_dir, "models", "catalog.json")
# Setup test resources and models
setup_dbt_test(destination_type, test_resource_name, test_root_dir)
dbt_test_utils.dbt_check(destination_type, test_root_dir)
# Run dbt process
dbt_test_utils.dbt_run(destination_type, test_root_dir, force_full_refresh=True)
copy_tree(os.path.join(test_root_dir, "build/run/airbyte_utils/models/generated/"), os.path.join(test_root_dir, "first_output"))
shutil.rmtree(os.path.join(test_root_dir, "build/run/airbyte_utils/models/generated/"), ignore_errors=True)
# Verify dbt process
dbt_test(destination_type, test_root_dir)
def run_incremental_normalization(destination_type: DestinationType, test_resource_name: str, test_root_dir: str):
# Use destination connector to reset _airbyte_raw_* tables with new incremental data
setup_incremental_data(destination_type, test_resource_name, test_root_dir)
# setup new test files
setup_dbt_incremental_test(destination_type, test_resource_name, test_root_dir)
# Run dbt process
dbt_test_utils.dbt_run(destination_type, test_root_dir)
normalize_dbt_output(test_root_dir, "build/run/airbyte_utils/models/generated/", "second_output")
if destination_type.value in [DestinationType.MYSQL.value, DestinationType.ORACLE.value]:
pytest.skip(f"{destination_type} does not support incremental yet")
dbt_test(destination_type, test_root_dir)
def run_schema_change_normalization(destination_type: DestinationType, test_resource_name: str, test_root_dir: str):
if destination_type.value in [DestinationType.MYSQL.value, DestinationType.ORACLE.value]:
# TODO: upgrade dbt-adapter repositories to work with dbt 0.21.0+ (outside airbyte's control)
pytest.skip(f"{destination_type} does not support schema change in incremental yet (requires dbt 0.21.0+)")
if destination_type.value in [DestinationType.SNOWFLAKE.value, DestinationType.CLICKHOUSE.value]:
pytest.skip(f"{destination_type} is disabled as it doesnt support schema change in incremental yet (column type changes)")
if destination_type.value in [DestinationType.MSSQL.value, DestinationType.SNOWFLAKE.value]:
# TODO: create/fix github issue in corresponding dbt-adapter repository to handle schema changes (outside airbyte's control)
pytest.skip(f"{destination_type} is disabled as it doesnt fully support schema change in incremental yet")
setup_schema_change_data(destination_type, test_resource_name, test_root_dir)
generate_dbt_models(destination_type, test_resource_name, test_root_dir, "modified_models", "catalog_schema_change.json")
setup_dbt_schema_change_test(destination_type, test_resource_name, test_root_dir)
dbt_test_utils.dbt_run(destination_type, test_root_dir)
normalize_dbt_output(test_root_dir, "build/run/airbyte_utils/modified_models/generated/", "third_output")
dbt_test(destination_type, test_root_dir)
def normalize_dbt_output(test_root_dir: str, input_dir: str, output_dir: str):
tmp_dir = os.path.join(test_root_dir, input_dir)
output_dir = os.path.join(test_root_dir, output_dir)
shutil.rmtree(output_dir, ignore_errors=True)
def copy_replace_dbt_tmp(src, dst):
dbt_test_utils.copy_replace(src, dst, "__dbt_tmp[0-9]+", "__dbt_tmp")
shutil.copytree(tmp_dir, output_dir, copy_function=copy_replace_dbt_tmp)
shutil.rmtree(tmp_dir, ignore_errors=True)
def setup_test_dir(destination_type: DestinationType, test_resource_name: str) -> str:
"""
We prepare a clean folder to run the tests from.
if the test_resource_name is part of git_versioned_tests, then dbt models and final sql outputs
will be written to a folder included in airbyte git repository.
Non-versioned tests will be written in /tmp folders instead.
The purpose is to keep track of a small set of downstream changes on selected integration tests cases.
- generated dbt models created by normalization script from an input destination_catalog.json
- final output sql files created by dbt CLI from the generated dbt models (dbt models are sql files with jinja templating,
these are interpreted and compiled into the native SQL dialect of the final destination engine)
"""
if test_resource_name in git_versioned_tests:
test_root_dir = f"{pathlib.Path().absolute()}/normalization_test_output/{destination_type.value.lower()}"
else:
test_root_dir = f"{pathlib.Path().joinpath('..', 'build', 'normalization_test_output', destination_type.value.lower()).resolve()}"
os.makedirs(test_root_dir, exist_ok=True)
test_root_dir = f"{test_root_dir}/{test_resource_name}"
shutil.rmtree(test_root_dir, ignore_errors=True)
print(f"Setting up test folder {test_root_dir}")
dbt_project_yaml = "../dbt-project-template/dbt_project.yml"
copy_tree("../dbt-project-template", test_root_dir)
if destination_type.value == DestinationType.MSSQL.value:
copy_tree("../dbt-project-template-mssql", test_root_dir)
dbt_project_yaml = "../dbt-project-template-mssql/dbt_project.yml"
elif destination_type.value == DestinationType.MYSQL.value:
copy_tree("../dbt-project-template-mysql", test_root_dir)
dbt_project_yaml = "../dbt-project-template-mysql/dbt_project.yml"
elif destination_type.value == DestinationType.ORACLE.value:
copy_tree("../dbt-project-template-oracle", test_root_dir)
dbt_project_yaml = "../dbt-project-template-oracle/dbt_project.yml"
elif destination_type.value == DestinationType.CLICKHOUSE.value:
copy_tree("../dbt-project-template-clickhouse", test_root_dir)
dbt_project_yaml = "../dbt-project-template-clickhouse/dbt_project.yml"
dbt_test_utils.copy_replace(dbt_project_yaml, os.path.join(test_root_dir, "dbt_project.yml"))
return test_root_dir
def setup_input_raw_data(
destination_type: DestinationType, test_resource_name: str, test_root_dir: str, destination_config: Dict[str, Any]
) -> bool:
"""
We run docker images of destinations to upload test data stored in the messages.txt file for each test case.
This should populate the associated "raw" tables from which normalization is reading from when running dbt CLI.
"""
catalog_file = os.path.join("resources", test_resource_name, "data_input", "catalog.json")
message_file = os.path.join("resources", test_resource_name, "data_input", "messages.txt")
dbt_test_utils.copy_replace(
catalog_file,
os.path.join(test_root_dir, "reset_catalog.json"),
pattern='"destination_sync_mode": ".*"',
replace_value='"destination_sync_mode": "overwrite"',
)
dbt_test_utils.copy_replace(catalog_file, os.path.join(test_root_dir, "destination_catalog.json"))
config_file = os.path.join(test_root_dir, "destination_config.json")
with open(config_file, "w") as f:
f.write(json.dumps(destination_config))
# Force a reset in destination raw tables
assert run_destination_process(destination_type, test_root_dir, "", "reset_catalog.json")
# Run a sync to create raw tables in destinations
return run_destination_process(destination_type, test_root_dir, message_file, "destination_catalog.json")
def setup_incremental_data(destination_type: DestinationType, test_resource_name: str, test_root_dir: str) -> bool:
message_file = os.path.join("resources", test_resource_name, "data_input", "messages_incremental.txt")
# Force a reset in destination raw tables
assert run_destination_process(destination_type, test_root_dir, "", "reset_catalog.json")
# Run a sync to create raw tables in destinations
return run_destination_process(destination_type, test_root_dir, message_file, "destination_catalog.json")
def setup_schema_change_data(destination_type: DestinationType, test_resource_name: str, test_root_dir: str) -> bool:
catalog_file = os.path.join("resources", test_resource_name, "data_input", "catalog_schema_change.json")
message_file = os.path.join("resources", test_resource_name, "data_input", "messages_schema_change.txt")
dbt_test_utils.copy_replace(
catalog_file,
os.path.join(test_root_dir, "reset_catalog.json"),
pattern='"destination_sync_mode": ".*"',
replace_value='"destination_sync_mode": "overwrite"',
)
dbt_test_utils.copy_replace(catalog_file, os.path.join(test_root_dir, "destination_catalog.json"))
dbt_test_utils.copy_replace(
os.path.join(test_root_dir, "dbt_project.yml"),
os.path.join(test_root_dir, "first_dbt_project.yml"),
)
dbt_test_utils.copy_replace(
os.path.join(test_root_dir, "first_dbt_project.yml"),
os.path.join(test_root_dir, "dbt_project.yml"),
pattern=r'source-paths: \["models"\]',
replace_value='source-paths: ["modified_models"]',
)
# Run a sync to update raw tables in destinations
return run_destination_process(destination_type, test_root_dir, message_file, "destination_catalog.json")
def run_destination_process(destination_type: DestinationType, test_root_dir: str, message_file: str, catalog_file: str):
commands = [
"docker",
"run",
"--rm",
"--init",
"-v",
f"{test_root_dir}:/data",
"--network",
"host",
"-i",
f"airbyte/destination-{destination_type.value.lower()}:dev",
"write",
"--config",
"/data/destination_config.json",
"--catalog",
]
return dbt_test_utils.run_destination_process(message_file, test_root_dir, commands + [f"/data/{catalog_file}"])
def generate_dbt_models(destination_type: DestinationType, test_resource_name: str, test_root_dir: str, output_dir: str, catalog_file: str):
"""
This is the normalization step generating dbt models files from the destination_catalog.json taken as input.
"""
catalog_processor = CatalogProcessor(os.path.join(test_root_dir, output_dir, "generated"), destination_type)
catalog_processor.process(
os.path.join("resources", test_resource_name, "data_input", catalog_file), "_airbyte_data", dbt_test_utils.target_schema
)
def setup_dbt_test(destination_type: DestinationType, test_resource_name: str, test_root_dir: str):
"""
Prepare the data (copy) for the models for dbt test.
"""
replace_identifiers = os.path.join("resources", test_resource_name, "data_input", "replace_identifiers.json")
copy_test_files(
os.path.join("resources", test_resource_name, "dbt_test_config", "dbt_schema_tests"),
os.path.join(test_root_dir, "models/dbt_schema_tests"),
destination_type,
replace_identifiers,
)
copy_test_files(
os.path.join("resources", test_resource_name, "dbt_test_config", "dbt_data_tests_tmp"),
os.path.join(test_root_dir, "models/dbt_data_tests"),
destination_type,
replace_identifiers,
)
copy_test_files(
os.path.join("resources", test_resource_name, "dbt_test_config", "dbt_data_tests"),
os.path.join(test_root_dir, "tests"),
destination_type,
replace_identifiers,
)
def setup_dbt_incremental_test(destination_type: DestinationType, test_resource_name: str, test_root_dir: str):
"""
Prepare the data (copy) for the models for dbt test.
"""
replace_identifiers = os.path.join("resources", test_resource_name, "data_input", "replace_identifiers.json")
copy_test_files(
os.path.join("resources", test_resource_name, "dbt_test_config", "dbt_schema_tests_incremental"),
os.path.join(test_root_dir, "models/dbt_schema_tests"),
destination_type,
replace_identifiers,
)
test_directory = os.path.join(test_root_dir, "models/dbt_data_tests")
shutil.rmtree(test_directory, ignore_errors=True)
os.makedirs(test_directory, exist_ok=True)
copy_test_files(
os.path.join("resources", test_resource_name, "dbt_test_config", "dbt_data_tests_tmp_incremental"),
test_directory,
destination_type,
replace_identifiers,
)
test_directory = os.path.join(test_root_dir, "tests")
shutil.rmtree(test_directory, ignore_errors=True)
os.makedirs(test_directory, exist_ok=True)
copy_test_files(
os.path.join("resources", test_resource_name, "dbt_test_config", "dbt_data_tests_incremental"),
test_directory,
destination_type,
replace_identifiers,
)
def setup_dbt_schema_change_test(destination_type: DestinationType, test_resource_name: str, test_root_dir: str):
"""
Prepare the data (copy) for the models for dbt test.
"""
replace_identifiers = os.path.join("resources", test_resource_name, "data_input", "replace_identifiers.json")
copy_test_files(
os.path.join("resources", test_resource_name, "dbt_test_config", "dbt_schema_tests_schema_change"),
os.path.join(test_root_dir, "modified_models/dbt_schema_tests"),
destination_type,
replace_identifiers,
)
test_directory = os.path.join(test_root_dir, "modified_models/dbt_data_tests")
shutil.rmtree(test_directory, ignore_errors=True)
os.makedirs(test_directory, exist_ok=True)
copy_test_files(
os.path.join("resources", test_resource_name, "dbt_test_config", "dbt_data_tests_tmp_schema_change"),
test_directory,
destination_type,
replace_identifiers,
)
test_directory = os.path.join(test_root_dir, "tests")
shutil.rmtree(test_directory, ignore_errors=True)
os.makedirs(test_directory, exist_ok=True)
copy_test_files(
os.path.join("resources", test_resource_name, "dbt_test_config", "dbt_data_tests_schema_change"),
test_directory,
destination_type,
replace_identifiers,
)
def dbt_test(destination_type: DestinationType, test_root_dir: str):
"""
dbt provides a way to run dbt tests as described here: https://docs.getdbt.com/docs/building-a-dbt-project/tests
- Schema tests are added in .yml files from the schema_tests directory
- see additional macros for testing here: https://github.com/fishtown-analytics/dbt-utils#schema-tests
- Data tests are added in .sql files from the data_tests directory and should return 0 records to be successful
We use this mechanism to verify the output of our integration tests.
"""
normalization_image: str = dbt_test_utils.get_normalization_image(destination_type)
assert dbt_test_utils.run_check_dbt_command(normalization_image, "test", test_root_dir)
def copy_test_files(src: str, dst: str, destination_type: DestinationType, replace_identifiers: str):
"""
Copy file while hacking snowflake identifiers that needs to be uppercased...
(so we can share these dbt tests files accross destinations)
"""
if os.path.exists(src):
temp_dir = tempfile.mkdtemp(dir="/tmp/", prefix="normalization_test_")
temporary_folders.add(temp_dir)
# Copy and adapt capitalization
if destination_type.value == DestinationType.SNOWFLAKE.value:
shutil.copytree(src, temp_dir + "/upper", copy_function=copy_upper)
src = temp_dir + "/upper"
elif destination_type.value == DestinationType.REDSHIFT.value:
shutil.copytree(src, temp_dir + "/lower", copy_function=copy_lower)
src = temp_dir + "/lower"
if os.path.exists(replace_identifiers):
with open(replace_identifiers, "r") as file:
contents = file.read()
identifiers_map = json.loads(contents)
pattern = []
replace_value = []
if dbt_test_utils.target_schema != "test_normalization":
pattern.append("test_normalization")
if destination_type.value == DestinationType.SNOWFLAKE.value:
replace_value.append(dbt_test_utils.target_schema.upper())
else:
replace_value.append(dbt_test_utils.target_schema)
if destination_type.value in identifiers_map:
for entry in identifiers_map[destination_type.value]:
for k in entry:
# re.escape() must not be used for the replacement string in sub(), only backslashes should be escaped:
# see https://docs.python.org/3/library/re.html#re.escape
pattern.append(k.replace("\\", r"\\"))
replace_value.append(entry[k])
if pattern and replace_value:
def copy_replace_identifiers(src, dst):
dbt_test_utils.copy_replace(src, dst, pattern, replace_value)
shutil.copytree(src, temp_dir + "/replace", copy_function=copy_replace_identifiers)
src = temp_dir + "/replace"
# final copy
copy_tree(src, dst)
def copy_upper(src, dst):
print(src, "->", dst)
dbt_test_utils.copy_replace(
src,
dst,
pattern=[
r"(- name:) *(.*)",
r"(ref\(')(.*)('\))",
r"(source\(')(.*)('\))",
],
replace_value=[
to_upper_identifier,
to_upper_identifier,
to_upper_identifier,
],
)
def copy_lower(src, dst):
print(src, "->", dst)
dbt_test_utils.copy_replace(
src,
dst,
pattern=[
r"(- name:) *(.*)",
r"(ref\(')(.*)('\))",
r"(source\(')(.*)('\))",
],
replace_value=[
to_lower_identifier,
to_lower_identifier,
to_lower_identifier,
],
)
def to_upper_identifier(input: re.Match) -> str:
if len(input.groups()) == 2:
return f"{input.group(1)} {input.group(2).upper()}"
elif len(input.groups()) == 3:
return f"{input.group(1)}{input.group(2).upper()}{input.group(3)}"
else:
raise Exception(f"Unexpected number of groups in {input}")
def to_lower_identifier(input: re.Match) -> str:
if len(input.groups()) == 2:
return f"{input.group(1)} {input.group(2).lower()}"
elif len(input.groups()) == 3:
return f"{input.group(1)}{input.group(2).lower()}{input.group(3)}"
else:
raise Exception(f"Unexpected number of groups in {input}")
|
py | b411af8a9b23e890a477fd5f3653c08d4f098da4 | import numpy as np
import logging
from copy import copy
from typing import Callable, Any, Tuple, List, Union, Dict
from squirrel.GaussianProcess import GaussianProcess
from squirrel.GaussianProcess.trend import constant_trend
from cma import CMAEvolutionStrategy
from cma.optimization_tools import BestSolution
from ConfigSpace import ConfigurationSpace, Configuration
from ConfigSpace.hyperparameters import UniformFloatHyperparameter, \
UniformIntegerHyperparameter, CategoricalHyperparameter, OrdinalHyperparameter
Vector = List[float]
Matrix = List[Vector]
def handle_box_constraint(x, lb, ub):
"""This function transforms x to t w.r.t. the low and high
boundaries lb and ub. It implements the function T^{r}_{[a,b]} as
described in Rui Li's PhD thesis "Mixed-Integer Evolution Strategies
for Parameter Optimization and Their Applications to Medical Image
Analysis" as alorithm 6.
"""
x = np.asarray(x, dtype='float')
shape_ori = x.shape
x = np.atleast_2d(x)
lb = np.atleast_1d(lb)
ub = np.atleast_1d(ub)
transpose = False
if x.shape[0] != len(lb):
x = x.T
transpose = True
lb, ub = lb.flatten(), ub.flatten()
lb_index = np.isfinite(lb)
up_index = np.isfinite(ub)
valid = np.bitwise_and(lb_index, up_index)
LB = lb[valid][:, np.newaxis]
UB = ub[valid][:, np.newaxis]
y = (x[valid, :] - LB) / (UB - LB)
I = np.mod(np.floor(y), 2) == 0
yprime = np.zeros(y.shape)
yprime[I] = np.abs(y[I] - np.floor(y[I]))
yprime[~I] = 1.0 - np.abs(y[~I] - np.floor(y[~I]))
x[valid, :] = LB + (UB - LB) * yprime
if transpose:
x = x.T
return x.reshape(shape_ori)
def vector_to_configspace(cs, vector):
'''Converts numpy array to ConfigSpace object
Works when self.cs is a ConfigSpace object and each component of the
input vector is in the domain [0, 1].
'''
new_config = cs.sample_configuration()
for i, hyper in enumerate(cs.get_hyperparameters()):
if type(hyper) == OrdinalHyperparameter:
ranges = np.arange(start=0, stop=1, step=1 / len(hyper.sequence))
param_value = hyper.sequence[np.where((vector[i] < ranges) == False)[0][-1]]
elif type(hyper) == CategoricalHyperparameter:
ranges = np.arange(start=0, stop=1, step=1 / len(hyper.choices))
param_value = hyper.choices[np.where((vector[i] < ranges) == False)[0][-1]]
else: # handles UniformFloatHyperparameter & UniformIntegerHyperparameter
# rescaling continuous values
param_value = hyper.lower + (hyper.upper - hyper.lower) * vector[i]
if type(hyper) == UniformIntegerHyperparameter:
param_value = np.round(param_value).astype(int) # converting to discrete (int)
new_config[hyper.name] = param_value
return new_config
class CMA(CMAEvolutionStrategy):
def __init__(
self,
cs,
popsize: int,
lb: Union[float, str, Vector, np.ndarray] = -np.inf,
ub: Union[float, str, Vector, np.ndarray] = np.inf,
ftarget: Union[int, float] = -np.inf,
max_FEs: Union[int, str] = np.inf,
verbose: bool = False,
logger=None
):
"""Wrapper Class for `pycma`
Parameters
----------
dim : int
dimensionality
popsize : int
population size
lb : Union[float, str, Vector, np.ndarray], optional
lower bound of the decision space, by default -np.inf
ub : Union[float, str, Vector, np.ndarray], optional
upper bound of the decision space, by default np.inf
ftarget : Union[int, float], optional
the target value, by default -np.inf
max_FEs : Union[int, str], optional
the evaluation budget, by default np.inf
verbose : bool, optional
the verbosity, by default False
logger : optional
a logger object, by default None
"""
inopts = {
'bounds': [lb, ub],
'ftarget': ftarget,
'popsize': popsize
}
sigma0 = (ub - lb) / 5
dim = len(cs.get_hyperparameters())
ub = np.array([ub] * dim)
lb = np.array([lb] * dim)
x0 = (ub - lb) * np.random.rand(dim) + lb
super().__init__(x0=x0, sigma0=sigma0, inopts=inopts)
self.dim = dim
self.logger = logger
self.max_FEs = max_FEs
self.ftarget = ftarget
self.verbose = verbose
self.stop_dict = {}
self.cs = cs
def init_with_rh(self, data, **kwargs):
X = np.atleast_2d([
Configuration(values=_[0], configuration_space=self.cs).get_array()\
for _ in data
])
y = np.array([_[1] for _ in data])
dim = X.shape[1]
fopt = np.min(y)
xopt = X[np.where(y == fopt)[0][0]]
mean = constant_trend(dim, beta=None) # Simple Kriging
thetaL = 1e-10 * np.ones(dim)
thetaU = 10 * np.ones(dim)
theta0 = np.random.rand(dim) * (thetaU - thetaL) + thetaL
model = GaussianProcess(
mean=mean, corr='squared_exponential',
theta0=theta0, thetaL=thetaL, thetaU=thetaU,
nugget=1e-6, noise_estim=False,
optimizer='BFGS', wait_iter=5, random_start=5 * dim,
eval_budget=100 * dim
)
model.fit(X, y)
# obtain the Hessian and gradient from the GP mean surface
H = model.Hessian(xopt)
g = model.gradient(xopt)[0]
w, B = np.linalg.eigh(H)
w[w <= 0] = 1e-6 # replace the negative eigenvalues by a very small value
w_min, w_max = np.min(w), np.max(w)
# to avoid the conditional number gets too high
cond_upper = 1e3
delta = (cond_upper * w_min - w_max) / (1 - cond_upper)
w += delta
# compute the upper bound for step-size
M = np.diag(1 / np.sqrt(w)).dot(B.T)
H_inv = B.dot(np.diag(1 / w)).dot(B.T)
p = -1 * H_inv.dot(g).ravel()
alpha = np.linalg.norm(p)
if np.isnan(alpha):
alpha = 1
H_inv = np.eye(dim)
# use a backtracking line search to determine the initial step-size
tau, c = 0.9, 1e-4
slope = np.inner(g.ravel(), p.ravel())
if slope > 0: # this should not happen..
p *= -1
slope *= -1
f = lambda x: model.predict(x)
while True:
_x = (xopt + alpha * p).reshape(1, -1)
if f(_x) <= f(xopt.reshape(1, -1)) + c * alpha * slope:
break
alpha *= tau
sigma0 = np.linalg.norm(M.dot(alpha * p)) / np.sqrt(dim - 0.5)
self.Cov = H_inv
self.sigma = self.sigma0 = sigma0
self._set_x0(xopt)
self.mean = self.gp.geno(
np.array(self.x0, copy=True),
from_bounds=self.boundary_handler.inverse,
copy=False
)
self.mean0 = np.array(self.mean, copy=True)
self.best = BestSolution(x=self.mean, f=fopt)
@property
def eval_count(self):
return self.countevals
@property
def iter_count(self):
return self.countiter
@property
def x(self):
return self.mean
@x.setter
def x(self, x):
self.mean = copy(x)
@property
def Cov(self):
return self.C
@Cov.setter
def Cov(self, C):
try:
w, B = np.linalg.eigh(C)
if np.all(np.isreal(w)):
self.B = self.sm.B = B
self.D = self.sm.D = w ** 0.5
self.dC = np.diag(C)
self.C = self.sm.C = C
self.sm._sortBD()
except np.linalg.LinAlgError:
pass
@property
def logger(self):
return self._logger
@logger.setter
def logger(self, logger):
if isinstance(logger, logging.Logger):
self._logger = logger
self._logger.propagate = False
return
def suggest(self, n_suggestions: int = 1) -> List[Dict]:
try:
_X = super().ask(number=n_suggestions)
self._X = [handle_box_constraint(x, 0, 1) for x in _X]
except Exception as e:
print(e)
return [vector_to_configspace(self.cs, x) for x in self._X]
def observe(self, X, y):
super().tell(self._X, y)
def check_stop(self):
_, f, __ = self.best.get()
if f <= self.ftarget:
self.stop_dict['ftarget'] = f
if self.countevals >= self.max_FEs:
self.stop_dict['FEs'] = self.countevals
return bool(self.stop_dict) |
py | b411af9fc5be822a7c38c271034a62dc56019db5 | import logging.config
import matplotlib.pyplot as plt
import os
from utils.utilities import set_log_level, get_project_path, convert_path_image_64
from wordcloud import WordCloud
logging.config.fileConfig("%s/logging.ini" % get_project_path())
set_log_level()
logger = logging.getLogger()
def words_clouds(counter: dict, file: str) -> str:
"""
Create wordcloud
:param counter: dict with the counter of words
:param file: path to save the wordcloud
:return: image in base64
"""
wc = WordCloud(background_color="white", scale=2, max_words=40, relative_scaling=0.5,
normalize_plurals=False).generate_from_frequencies(counter)
plt.figure(figsize=(20, 10))
plt.imshow(wc)
plt.axis("off")
plt.savefig(get_project_path() + file)
string_base_64 = convert_path_image_64(get_project_path() + file)
os.remove(get_project_path() + file)
return string_base_64
|
py | b411b02f98b63223527cba419ce4831979bc2665 | # Copyright (c) 2021, Serum Studio
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from .password import Password
from .confirm import Confirm
from .input import Input
from .error import error
__all__ = ["Password", "Confirm", "Input", "error"]
|
py | b411b38385bd31b82fc832166881d97c00a4cf9c | #!/usr/bin/env python3
import sys, os
from PyQt5 import uic
from PyQt5.QtCore import (QDir, QStandardPaths, QDirIterator, QFileInfo,
QUrl)
from PyQt5.QtWidgets import QApplication, QMainWindow, QFileDialog
from PyQt5.QtMultimedia import (QMediaPlayer, QMediaPlaylist,
QMediaContent)
class Music(QMainWindow):
def __init__(self, parent=None):
super().__init__()
uic.loadUi(QDir.current().absoluteFilePath('music.ui'), self)
#print(QDir.current().absoluteFilePath('music.ui'))
self.player = QMediaPlayer()
self.playlist = QMediaPlaylist()
self.player.setPlaylist(self.playlist)
self.playMusicBtn.clicked.connect(self.player.play)
self.pauseMusicBtn.clicked.connect(self.player.pause)
self.stopMusicBtn.clicked.connect(self.player.stop)
self.nextMusicBtn.clicked.connect(self.playlist.next)
self.previousMusicButton.clicked.connect(self.playlist.previous)
self.openFilesBtn.clicked.connect(self.openFiles)
self.openDirectoryBtn.clicked.connect(self.openDir)
self.playlist.currentMediaChanged.connect(self.updateLabels)
self.show()
def buildPlayList(self, fileNames):
for name in fileNames:
print(name)
url = QUrl.fromLocalFile(name)
self.playlist.addMedia(QMediaContent(url))
#self.playlist.setPlaybackMode(QMediaPlaylist.Loop)
def openFiles(self):
#fileNames, _ = QFileDialog.getOpenFileNames(self, "Open Files")
music = QStandardPaths.writableLocation(QStandardPaths.MusicLocation)
path, _ = QFileDialog.getOpenFileName(self, "Open file", directory=music,
options=QFileDialog.DontUseNativeDialog)
print(type(path))
print(path)
self.playlist.addMedia(QMediaContent(QUrl.fromLocalFile(path)))
print(type(path))
print(path)
def openDir(self):
#dir _ = QtGui.QFileDialog.getExistingDirectory(None, 'Select a directory:', 'C:\\', QtGui.QFileDialog.ShowDirsOnly)
home = QStandardPaths.writableLocation(QStandardPaths.HomeLocation)
d = QFileDialog.getExistingDirectory(caption="Choose Directory", directory=home,
options=QFileDialog.DontUseNativeDialog)
#print(x)
files = os.listdir(d)
#print(type(files))
for i in files:
path = os.path.join(d, i)
self.playlist.addMedia(QMediaContent(QUrl.fromLocalFile(path)))
#print('{} {}'.format(path, type(i)))
#print(os.listdir(path=x))
#self.buildPlayList(os.listdir(path=x))
songCount = self.playlist.mediaCount()
self.songsLoadedLbl.setText('Songs Loaded = {}'.format(songCount))
def updateLabels(self, media=None):
#print(media)
self.statusBar().showMessage(str(media.canonicalUrl().fileName()))
if __name__ == '__main__':
app = QApplication(sys.argv)
window = Music()
sys.exit(app.exec_())
|
py | b411b3e5fbfd505a0dc134ad4f33291535d660f6 | #!/usr/bin/env python2.7
# -*- encoding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" The Heron executor is a process that runs on a container and is responsible for starting and
monitoring the processes of the topology and it's support services."""
import argparse
import atexit
import base64
import functools
import json
import os
import random
import signal
import string
import subprocess
import sys
import stat
import threading
import time
import yaml
import socket
import traceback
from heron.common.src.python.utils import log
from heron.common.src.python.utils import proc
# pylint: disable=unused-import,too-many-lines
from heron.proto.packing_plan_pb2 import PackingPlan
from heron.statemgrs.src.python import statemanagerfactory
from heron.statemgrs.src.python import configloader
from heron.statemgrs.src.python.config import Config as StateMgrConfig
Log = log.Log
# pylint: disable=too-many-lines
def print_usage():
print(
"Usage: ./heron-executor --shard=<shardid> --topology-name=<topname>"
" --topology-id=<topid> --topology-defn-file=<topdefnfile>"
" --state-manager-connection=<state_manager_connection>"
" --state-manager-root=<state_manager_root>"
" --state-manager-config-file=<state_manager_config_file>"
" --tmaster-binary=<tmaster_binary>"
" --stmgr-binary=<stmgr_binary> --metrics-manager-classpath=<metricsmgr_classpath>"
" --instance-jvm-opts=<instance_jvm_opts_in_base64> --classpath=<classpath>"
" --master-port=<master_port> --tmaster-controller-port=<tmaster_controller_port>"
" --tmaster-stats-port=<tmaster_stats_port>"
" --heron-internals-config-file=<heron_internals_config_file>"
" --override-config-file=<override_config_file> --component-ram-map=<component_ram_map>"
" --component-jvm-opts=<component_jvm_opts_in_base64> --pkg-type=<pkg_type>"
" --topology-binary-file=<topology_bin_file> --heron-java-home=<heron_java_home>"
" --shell-port=<shell-port> --heron-shell-binary=<heron_shell_binary>"
" --metrics-manager-port=<metricsmgr_port>"
" --cluster=<cluster> --role=<role> --environment=<environ>"
" --instance-classpath=<instance_classpath>"
" --metrics-sinks-config-file=<metrics_sinks_config_file>"
" --scheduler-classpath=<scheduler_classpath> --scheduler-port=<scheduler_port>"
" --python-instance-binary=<python_instance_binary>"
" --metricscache-manager-classpath=<metricscachemgr_classpath>"
" --metricscache-manager-master-port=<metricscachemgr_masterport>"
" --metricscache-manager-stats-port=<metricscachemgr_statsport>"
" --is-stateful=<is_stateful> --checkpoint-manager-classpath=<ckptmgr_classpath>"
" --checkpoint-manager-port=<ckptmgr_port> --checkpoint-manager-ram=<checkpoint_manager_ram>"
" --stateful-config-file=<stateful_config_file>"
" --health-manager-mode=<healthmgr_mode> --health-manager-classpath=<healthmgr_classpath>"
" --cpp-instance-binary=<cpp_instance_binary>"
" --jvm-remote-debugger-ports=<comma_seperated_port_list>")
def id_map(prefix, container_plans, add_zero_id=False):
ids = {}
if add_zero_id:
ids[0] = "%s-0" % prefix
for container_plan in container_plans:
ids[container_plan.id] = "%s-%d" % (prefix, container_plan.id)
return ids
def stmgr_map(container_plans):
return id_map("stmgr", container_plans)
def metricsmgr_map(container_plans):
return id_map("metricsmgr", container_plans, True)
def ckptmgr_map(container_plans):
return id_map("ckptmgr", container_plans, True)
def heron_shell_map(container_plans):
return id_map("heron-shell", container_plans, True)
def get_heron_executor_process_name(shard_id):
return 'heron-executor-%d' % shard_id
def get_process_pid_filename(process_name):
return '%s.pid' % process_name
def get_tmp_filename():
return '%s.heron.tmp' % (''.join(random.choice(string.ascii_uppercase) for i in range(12)))
def atomic_write_file(path, content):
"""
file.write(...) is not atomic.
We write to a tmp file and then rename to target path since rename is atomic.
We do this to avoid the content of file is dirty read/partially read by others.
"""
# Write to a randomly tmp file
tmp_file = get_tmp_filename()
with open(tmp_file, 'w') as f:
f.write(content)
# make sure that all data is on disk
f.flush()
os.fsync(f.fileno())
# Rename the tmp file
os.rename(tmp_file, path)
def log_pid_for_process(process_name, pid):
filename = get_process_pid_filename(process_name)
Log.info('Logging pid %d to file %s' %(pid, filename))
atomic_write_file(filename, str(pid))
def is_docker_environment():
return os.path.isfile('/.dockerenv')
def stdout_log_fn(cmd):
"""Simple function callback that is used to log the streaming output of a subprocess command
:param cmd: the name of the command which will be added to the log line
:return: None
"""
# Log the messages to stdout and strip off the newline because Log.info adds one automatically
return lambda line: Log.info("%s stdout: %s", cmd, line.rstrip('\n'))
class ProcessInfo(object):
def __init__(self, process, name, command, attempts=1):
"""
Container for info related to a running process
:param process: the process POpen object
:param name: the logical (i.e., unique) name of the process
:param command: an array of strings comprising the command and it's args
:param attempts: how many times the command has been run (defaults to 1)
"""
self.process = process
self.pid = process.pid
self.name = name
self.command = command
self.command_str = ' '.join(command) # convenience for unit tests
self.attempts = attempts
def increment_attempts(self):
self.attempts += 1
return self
# pylint: disable=too-many-instance-attributes,too-many-statements
class HeronExecutor(object):
""" Heron executor is a class that is responsible for running each of the process on a given
container. Based on the container id and the instance distribution, it determines if the container
is a master node or a worker node and it starts processes accordingly."""
def init_from_parsed_args(self, parsed_args):
""" initialize from parsed arguments """
self.shard = parsed_args.shard
self.topology_name = parsed_args.topology_name
self.topology_id = parsed_args.topology_id
self.topology_defn_file = parsed_args.topology_defn_file
self.state_manager_connection = parsed_args.state_manager_connection
self.state_manager_root = parsed_args.state_manager_root
self.state_manager_config_file = parsed_args.state_manager_config_file
self.tmaster_binary = parsed_args.tmaster_binary
self.stmgr_binary = parsed_args.stmgr_binary
self.metrics_manager_classpath = parsed_args.metrics_manager_classpath
self.metricscache_manager_classpath = parsed_args.metricscache_manager_classpath
# '=' can be parsed in a wrong way by some schedulers (aurora) hence it needs to be escaped.
# It is escaped in two different ways. '(61)' is the new escaping. '=' was
# the original replacement but it is not friendly to bash and is causing issues. The original
# escaping is still left there for reference and backward compatibility purposes (to be
# removed after no topology needs it)
self.instance_jvm_opts =\
base64.b64decode(parsed_args.instance_jvm_opts.lstrip('"').
rstrip('"').replace('(61)', '=').replace('=', '='))
self.classpath = parsed_args.classpath
# Needed for Docker environments since the hostname of a docker container is the container's
# id within docker, rather than the host's hostname. NOTE: this 'HOST' env variable is not
# guaranteed to be set in all Docker executor environments (outside of Marathon)
if is_docker_environment():
self.master_host = os.environ.get('HOST') if 'HOST' in os.environ else socket.gethostname()
else:
self.master_host = socket.gethostname()
self.master_port = parsed_args.master_port
self.tmaster_controller_port = parsed_args.tmaster_controller_port
self.tmaster_stats_port = parsed_args.tmaster_stats_port
self.heron_internals_config_file = parsed_args.heron_internals_config_file
self.override_config_file = parsed_args.override_config_file
self.component_ram_map =\
map(lambda x: {x.split(':')[0]:
int(x.split(':')[1])}, parsed_args.component_ram_map.split(','))
self.component_ram_map =\
functools.reduce(lambda x, y: dict(x.items() + y.items()), self.component_ram_map)
# component_jvm_opts_in_base64 itself is a base64-encoding-json-map, which is appended with
# " at the start and end. It also escapes "=" to "&equals" due to aurora limitation
# And the json is a map from base64-encoding-component-name to base64-encoding-jvm-options
self.component_jvm_opts = {}
# First we need to decode the base64 string back to a json map string.
# '=' can be parsed in a wrong way by some schedulers (aurora) hence it needs to be escaped.
# It is escaped in two different ways. '(61)' is the new escaping. '=' was
# the original replacement but it is not friendly to bash and is causing issues. The original
# escaping is still left there for reference and backward compatibility purposes (to be
# removed after no topology needs it)
component_jvm_opts_in_json =\
base64.b64decode(parsed_args.component_jvm_opts.
lstrip('"').rstrip('"').replace('(61)', '=').replace('=', '='))
if component_jvm_opts_in_json != "":
for (k, v) in json.loads(component_jvm_opts_in_json).items():
# In json, the component name and JVM options are still in base64 encoding
self.component_jvm_opts[base64.b64decode(k)] = base64.b64decode(v)
self.pkg_type = parsed_args.pkg_type
self.topology_binary_file = parsed_args.topology_binary_file
self.heron_java_home = parsed_args.heron_java_home
self.shell_port = parsed_args.shell_port
self.heron_shell_binary = parsed_args.heron_shell_binary
self.metrics_manager_port = parsed_args.metrics_manager_port
self.metricscache_manager_master_port = parsed_args.metricscache_manager_master_port
self.metricscache_manager_stats_port = parsed_args.metricscache_manager_stats_port
self.cluster = parsed_args.cluster
self.role = parsed_args.role
self.environment = parsed_args.environment
self.instance_classpath = parsed_args.instance_classpath
self.metrics_sinks_config_file = parsed_args.metrics_sinks_config_file
self.scheduler_classpath = parsed_args.scheduler_classpath
self.scheduler_port = parsed_args.scheduler_port
self.python_instance_binary = parsed_args.python_instance_binary
self.cpp_instance_binary = parsed_args.cpp_instance_binary
self.is_stateful_topology = (parsed_args.is_stateful.lower() == 'true')
self.checkpoint_manager_classpath = parsed_args.checkpoint_manager_classpath
self.checkpoint_manager_port = parsed_args.checkpoint_manager_port
self.checkpoint_manager_ram = parsed_args.checkpoint_manager_ram
self.stateful_config_file = parsed_args.stateful_config_file
self.metricscache_manager_mode = parsed_args.metricscache_manager_mode \
if parsed_args.metricscache_manager_mode else "disabled"
self.health_manager_mode = parsed_args.health_manager_mode
self.health_manager_classpath = '%s:%s'\
% (self.scheduler_classpath, parsed_args.health_manager_classpath)
self.jvm_remote_debugger_ports = \
parsed_args.jvm_remote_debugger_ports.split(",") \
if parsed_args.jvm_remote_debugger_ports else None
def __init__(self, args, shell_env):
parsed_args = self.parse_args(args)
self.init_from_parsed_args(parsed_args)
self.shell_env = shell_env
self.max_runs = 100
self.interval_between_runs = 10
# Read the heron_internals.yaml for logging dir
self.log_dir = self._load_logging_dir(self.heron_internals_config_file)
# these get set when we call update_packing_plan
self.packing_plan = None
self.stmgr_ids = {}
self.metricsmgr_ids = {}
self.heron_shell_ids = {}
self.ckptmgr_ids = {}
# processes_to_monitor gets set once processes are launched. we need to synchronize rw to this
# dict since is used by multiple threads
self.process_lock = threading.RLock()
self.processes_to_monitor = {}
self.state_managers = []
self.jvm_version = None
@staticmethod
def parse_args(args):
"""Uses python argparse to collect positional args"""
Log.info("Input args: %r" % args)
parser = argparse.ArgumentParser()
parser.add_argument("--shard", type=int, required=True)
parser.add_argument("--topology-name", required=True)
parser.add_argument("--topology-id", required=True)
parser.add_argument("--topology-defn-file", required=True)
parser.add_argument("--state-manager-connection", required=True)
parser.add_argument("--state-manager-root", required=True)
parser.add_argument("--state-manager-config-file", required=True)
parser.add_argument("--tmaster-binary", required=True)
parser.add_argument("--stmgr-binary", required=True)
parser.add_argument("--metrics-manager-classpath", required=True)
parser.add_argument("--instance-jvm-opts", required=True)
parser.add_argument("--classpath", required=True)
parser.add_argument("--master-port", required=True)
parser.add_argument("--tmaster-controller-port", required=True)
parser.add_argument("--tmaster-stats-port", required=True)
parser.add_argument("--heron-internals-config-file", required=True)
parser.add_argument("--override-config-file", required=True)
parser.add_argument("--component-ram-map", required=True)
parser.add_argument("--component-jvm-opts", required=True)
parser.add_argument("--pkg-type", required=True)
parser.add_argument("--topology-binary-file", required=True)
parser.add_argument("--heron-java-home", required=True)
parser.add_argument("--shell-port", required=True)
parser.add_argument("--heron-shell-binary", required=True)
parser.add_argument("--metrics-manager-port", required=True)
parser.add_argument("--cluster", required=True)
parser.add_argument("--role", required=True)
parser.add_argument("--environment", required=True)
parser.add_argument("--instance-classpath", required=True)
parser.add_argument("--metrics-sinks-config-file", required=True)
parser.add_argument("--scheduler-classpath", required=True)
parser.add_argument("--scheduler-port", required=True)
parser.add_argument("--python-instance-binary", required=True)
parser.add_argument("--cpp-instance-binary", required=True)
parser.add_argument("--metricscache-manager-classpath", required=True)
parser.add_argument("--metricscache-manager-master-port", required=True)
parser.add_argument("--metricscache-manager-stats-port", required=True)
parser.add_argument("--metricscache-manager-mode", required=False)
parser.add_argument("--is-stateful", required=True)
parser.add_argument("--checkpoint-manager-classpath", required=True)
parser.add_argument("--checkpoint-manager-port", required=True)
parser.add_argument("--checkpoint-manager-ram", type=long, required=True)
parser.add_argument("--stateful-config-file", required=True)
parser.add_argument("--health-manager-mode", required=True)
parser.add_argument("--health-manager-classpath", required=True)
parser.add_argument("--jvm-remote-debugger-ports", required=False,
help="ports to be used by a remote debugger for JVM instances")
parsed_args, unknown_args = parser.parse_known_args(args[1:])
if unknown_args:
Log.error('Unknown argument: %s' % unknown_args[0])
parser.print_help()
sys.exit(1)
return parsed_args
def run_command_or_exit(self, command):
if self._run_blocking_process(command, True, self.shell_env) != 0:
Log.error("Failed to run command: %s. Exiting" % command)
sys.exit(1)
def initialize(self):
"""
Initialize the environment. Done with a method call outside of the constructor for 2 reasons:
1. Unit tests probably won't want/need to do this
2. We don't initialize the logger (also something unit tests don't want) until after the
constructor
"""
create_folders = 'mkdir -p %s' % self.log_dir
self.run_command_or_exit(create_folders)
chmod_logs_dir = 'chmod a+rx . && chmod a+x %s' % self.log_dir
self.run_command_or_exit(chmod_logs_dir)
chmod_x_binaries = [self.tmaster_binary, self.stmgr_binary, self.heron_shell_binary]
for binary in chmod_x_binaries:
stat_result = os.stat(binary)[stat.ST_MODE]
if not stat_result & stat.S_IXOTH:
chmod_binary = 'chmod +x %s' % binary
self.run_command_or_exit(chmod_binary)
# Log itself pid
log_pid_for_process(get_heron_executor_process_name(self.shard), os.getpid())
def update_packing_plan(self, new_packing_plan):
self.packing_plan = new_packing_plan
self.stmgr_ids = stmgr_map(self.packing_plan.container_plans)
self.ckptmgr_ids = ckptmgr_map(self.packing_plan.container_plans)
self.metricsmgr_ids = metricsmgr_map(self.packing_plan.container_plans)
self.heron_shell_ids = heron_shell_map(self.packing_plan.container_plans)
# pylint: disable=no-self-use
def _load_logging_dir(self, heron_internals_config_file):
with open(heron_internals_config_file, 'r') as stream:
heron_internals_config = yaml.load(stream)
return heron_internals_config['heron.logging.directory']
def _get_metricsmgr_cmd(self, metricsManagerId, sink_config_file, port):
''' get the command to start the metrics manager processes '''
metricsmgr_main_class = 'org.apache.heron.metricsmgr.MetricsManager'
metricsmgr_cmd = [os.path.join(self.heron_java_home, 'bin/java'),
# We could not rely on the default -Xmx setting, which could be very big,
# for instance, the default -Xmx in Twitter mesos machine is around 18GB
'-Xmx1024M',
'-XX:+PrintCommandLineFlags',
'-verbosegc',
'-XX:+PrintGCDetails',
'-XX:+PrintGCTimeStamps',
'-XX:+PrintGCDateStamps',
'-XX:+PrintGCCause',
'-XX:+UseGCLogFileRotation',
'-XX:NumberOfGCLogFiles=5',
'-XX:GCLogFileSize=100M',
'-XX:+PrintPromotionFailure',
'-XX:+PrintTenuringDistribution',
'-XX:+PrintHeapAtGC',
'-XX:+HeapDumpOnOutOfMemoryError',
'-XX:+UseConcMarkSweepGC',
'-XX:+PrintCommandLineFlags',
'-Xloggc:log-files/gc.metricsmgr.log',
'-Djava.net.preferIPv4Stack=true',
'-cp',
self.metrics_manager_classpath,
metricsmgr_main_class,
'--id=' + metricsManagerId,
'--port=' + str(port),
'--topology=' + self.topology_name,
'--cluster=' + self.cluster,
'--role=' + self.role,
'--environment=' + self.environment,
'--topology-id=' + self.topology_id,
'--system-config-file=' + self.heron_internals_config_file,
'--override-config-file=' + self.override_config_file,
'--sink-config-file=' + sink_config_file]
return metricsmgr_cmd
def _get_metrics_cache_cmd(self):
''' get the command to start the metrics manager processes '''
metricscachemgr_main_class = 'org.apache.heron.metricscachemgr.MetricsCacheManager'
metricscachemgr_cmd = [os.path.join(self.heron_java_home, 'bin/java'),
# We could not rely on the default -Xmx setting, which could be very big,
# for instance, the default -Xmx in Twitter mesos machine is around 18GB
'-Xmx1024M',
'-XX:+PrintCommandLineFlags',
'-verbosegc',
'-XX:+PrintGCDetails',
'-XX:+PrintGCTimeStamps',
'-XX:+PrintGCDateStamps',
'-XX:+PrintGCCause',
'-XX:+UseGCLogFileRotation',
'-XX:NumberOfGCLogFiles=5',
'-XX:GCLogFileSize=100M',
'-XX:+PrintPromotionFailure',
'-XX:+PrintTenuringDistribution',
'-XX:+PrintHeapAtGC',
'-XX:+HeapDumpOnOutOfMemoryError',
'-XX:+UseConcMarkSweepGC',
'-XX:+PrintCommandLineFlags',
'-Xloggc:log-files/gc.metricscache.log',
'-Djava.net.preferIPv4Stack=true',
'-cp',
self.metricscache_manager_classpath,
metricscachemgr_main_class,
"--metricscache_id", 'metricscache-0',
"--master_port", self.metricscache_manager_master_port,
"--stats_port", self.metricscache_manager_stats_port,
"--topology_name", self.topology_name,
"--topology_id", self.topology_id,
"--system_config_file", self.heron_internals_config_file,
"--override_config_file", self.override_config_file,
"--sink_config_file", self.metrics_sinks_config_file,
"--cluster", self.cluster,
"--role", self.role,
"--environment", self.environment]
return metricscachemgr_cmd
def _get_healthmgr_cmd(self):
''' get the command to start the topology health manager processes '''
healthmgr_main_class = 'org.apache.heron.healthmgr.HealthManager'
healthmgr_cmd = [os.path.join(self.heron_java_home, 'bin/java'),
# We could not rely on the default -Xmx setting, which could be very big,
# for instance, the default -Xmx in Twitter mesos machine is around 18GB
'-Xmx1024M',
'-XX:+PrintCommandLineFlags',
'-verbosegc',
'-XX:+PrintGCDetails',
'-XX:+PrintGCTimeStamps',
'-XX:+PrintGCDateStamps',
'-XX:+PrintGCCause',
'-XX:+UseGCLogFileRotation',
'-XX:NumberOfGCLogFiles=5',
'-XX:GCLogFileSize=100M',
'-XX:+PrintPromotionFailure',
'-XX:+PrintTenuringDistribution',
'-XX:+PrintHeapAtGC',
'-XX:+HeapDumpOnOutOfMemoryError',
'-XX:+UseConcMarkSweepGC',
'-XX:+PrintCommandLineFlags',
'-Xloggc:log-files/gc.healthmgr.log',
'-Djava.net.preferIPv4Stack=true',
'-cp', self.health_manager_classpath,
healthmgr_main_class,
"--cluster", self.cluster,
"--role", self.role,
"--environment", self.environment,
"--topology_name", self.topology_name,
"--metricsmgr_port", self.metrics_manager_port]
return healthmgr_cmd
def _get_tmaster_processes(self):
''' get the command to start the tmaster processes '''
retval = {}
tmaster_cmd = [
self.tmaster_binary,
'--topology_name=%s' % self.topology_name,
'--topology_id=%s' % self.topology_id,
'--zkhostportlist=%s' % self.state_manager_connection,
'--zkroot=%s' % self.state_manager_root,
'--myhost=%s' % self.master_host,
'--master_port=%s' % str(self.master_port),
'--controller_port=%s' % str(self.tmaster_controller_port),
'--stats_port=%s' % str(self.tmaster_stats_port),
'--config_file=%s' % self.heron_internals_config_file,
'--override_config_file=%s' % self.override_config_file,
'--metrics_sinks_yaml=%s' % self.metrics_sinks_config_file,
'--metricsmgr_port=%s' % str(self.metrics_manager_port),
'--ckptmgr_port=%s' % str(self.checkpoint_manager_port)]
retval["heron-tmaster"] = tmaster_cmd
if self.metricscache_manager_mode.lower() != "disabled":
retval["heron-metricscache"] = self._get_metrics_cache_cmd()
if self.health_manager_mode.lower() != "disabled":
retval["heron-healthmgr"] = self._get_healthmgr_cmd()
retval[self.metricsmgr_ids[0]] = self._get_metricsmgr_cmd(
self.metricsmgr_ids[0],
self.metrics_sinks_config_file,
self.metrics_manager_port)
if self.is_stateful_topology:
retval.update(self._get_ckptmgr_process())
return retval
# Returns the processes for each Java Heron Instance
def _get_java_instance_cmd(self, instance_info):
retval = {}
# TO DO (Karthik) to be moved into keys and defaults files
instance_class_name = 'org.apache.heron.instance.HeronInstance'
if self.jvm_remote_debugger_ports and \
(len(instance_info) > len(self.jvm_remote_debugger_ports)):
Log.warn("Not enough remote debugger ports for all instances!")
# Create id to java command map
for (instance_id, component_name, global_task_id, component_index) in instance_info:
# Append debugger ports
remote_debugger_port = None
if self.jvm_remote_debugger_ports:
remote_debugger_port = self.jvm_remote_debugger_ports.pop()
instance_cmd = []
instance_cmd.append(self._get_jvm_instance_cmd()) # JVM command
instance_cmd.extend( # JVM options
self._get_jvm_instance_options(
instance_id, component_name, remote_debugger_port))
instance_cmd.append(instance_class_name) # Class name
instance_cmd.extend( # JVM arguments
self._get_jvm_instance_arguments(
instance_id, component_name, global_task_id, component_index, remote_debugger_port))
retval[instance_id] = instance_cmd
return retval
def _get_jvm_instance_cmd(self):
return os.path.join(self.heron_java_home, 'bin/java')
def _get_jvm_instance_options(self, instance_id, component_name, remote_debugger_port):
code_cache_size_mb = 64
java_metasize_mb = 128
total_jvm_size = int(self.component_ram_map[component_name] / (1024 * 1024))
heap_size_mb = total_jvm_size - code_cache_size_mb - java_metasize_mb
Log.info("component name: %s, RAM request: %d, total JVM size: %dM, "
"cache size: %dM, metaspace size: %dM"
% (component_name, self.component_ram_map[component_name],
total_jvm_size, code_cache_size_mb, java_metasize_mb))
xmn_size = int(heap_size_mb / 2)
java_version = self._get_jvm_version()
java_metasize_param = 'MetaspaceSize'
if java_version.startswith("1.7") or \
java_version.startswith("1.6") or \
java_version.startswith("1.5"):
java_metasize_param = 'PermSize'
instance_options = [
'-Xmx%dM' % heap_size_mb,
'-Xms%dM' % heap_size_mb,
'-Xmn%dM' % xmn_size,
'-XX:Max%s=%dM' % (java_metasize_param, java_metasize_mb),
'-XX:%s=%dM' % (java_metasize_param, java_metasize_mb),
'-XX:ReservedCodeCacheSize=%dM' % code_cache_size_mb,
'-XX:+CMSScavengeBeforeRemark',
'-XX:TargetSurvivorRatio=90',
'-XX:+PrintCommandLineFlags',
'-verbosegc',
'-XX:+PrintGCDetails',
'-XX:+PrintGCTimeStamps',
'-XX:+PrintGCDateStamps',
'-XX:+PrintGCCause',
'-XX:+UseGCLogFileRotation',
'-XX:NumberOfGCLogFiles=5',
'-XX:GCLogFileSize=100M',
'-XX:+PrintPromotionFailure',
'-XX:+PrintTenuringDistribution',
'-XX:+PrintHeapAtGC',
'-XX:+HeapDumpOnOutOfMemoryError',
'-XX:+UseConcMarkSweepGC',
'-XX:ParallelGCThreads=4',
'-Xloggc:log-files/gc.%s.log' % instance_id,
'-Djava.net.preferIPv4Stack=true',
'-cp',
'%s:%s'% (self.instance_classpath, self.classpath)]
# Append debugger ports when it is available
if remote_debugger_port:
instance_options.append('-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=%s'
% remote_debugger_port)
# Append user specified jvm options
instance_options.extend(self.instance_jvm_opts.split())
if component_name in self.component_jvm_opts:
instance_options.extend(self.component_jvm_opts[component_name].split())
return instance_options
def _get_jvm_instance_arguments(self, instance_id, component_name, global_task_id,
component_index, remote_debugger_port):
instance_args = [
'-topology_name', self.topology_name,
'-topology_id', self.topology_id,
'-instance_id', instance_id,
'-component_name', component_name,
'-task_id', str(global_task_id),
'-component_index', str(component_index),
'-stmgr_id', self.stmgr_ids[self.shard],
'-stmgr_port', self.tmaster_controller_port,
'-metricsmgr_port', self.metrics_manager_port,
'-system_config_file', self.heron_internals_config_file,
'-override_config_file', self.override_config_file]
# Append debugger ports when it is available
if remote_debugger_port:
instance_args += ['-remote_debugger_port', remote_debugger_port]
return instance_args
def _get_jvm_version(self):
if not self.jvm_version:
cmd = [os.path.join(self.heron_java_home, 'bin/java'),
'-cp', self.instance_classpath, 'org.apache.heron.instance.util.JvmVersion']
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(process_stdout, process_stderr) = process.communicate()
if process.returncode != 0:
Log.error("Failed to determine JVM version. Exiting. Output of %s: %s",
' '.join(cmd), process_stderr)
sys.exit(1)
self.jvm_version = process_stdout
Log.info("Detected JVM version %s" % self.jvm_version)
return self.jvm_version
# Returns the processes for each Python Heron Instance
def _get_python_instance_cmd(self, instance_info):
# pylint: disable=fixme
# TODO: currently ignoring ramsize, heap, etc.
retval = {}
for (instance_id, component_name, global_task_id, component_index) in instance_info:
Log.info("Python instance %s component: %s" %(instance_id, component_name))
instance_cmd = [self.python_instance_binary,
'--topology_name=%s' % self.topology_name,
'--topology_id=%s' % self.topology_id,
'--instance_id=%s' % instance_id,
'--component_name=%s' % component_name,
'--task_id=%s' % str(global_task_id),
'--component_index=%s' % str(component_index),
'--stmgr_id=%s' % self.stmgr_ids[self.shard],
'--stmgr_port=%s' % self.tmaster_controller_port,
'--metricsmgr_port=%s' % self.metrics_manager_port,
'--sys_config=%s' % self.heron_internals_config_file,
'--override_config=%s' % self.override_config_file,
'--topology_pex=%s' % self.topology_binary_file,
'--max_ram=%s' % str(self.component_ram_map[component_name])]
retval[instance_id] = instance_cmd
return retval
# Returns the processes for each CPP Heron Instance
def _get_cpp_instance_cmd(self, instance_info):
# pylint: disable=fixme
# TODO: currently ignoring ramsize, heap, etc.
retval = {}
for (instance_id, component_name, global_task_id, component_index) in instance_info:
Log.info("CPP instance %s component: %s" %(instance_id, component_name))
instance_cmd = [
self.cpp_instance_binary,
'--topology_name=%s' % self.topology_name,
'--topology_id=%s' % self.topology_id,
'--instance_id=%s' % instance_id,
'--component_name=%s' % component_name,
'--task_id=%s' % str(global_task_id),
'--component_index=%s' % str(component_index),
'--stmgr_id=%s' % self.stmgr_ids[self.shard],
'--stmgr_port=%s' % str(self.tmaster_controller_port),
'--metricsmgr_port=%s' % str(self.metrics_manager_port),
'--config_file=%s' % self.heron_internals_config_file,
'--override_config_file=%s' % self.override_config_file,
'--topology_binary=%s' % os.path.abspath(self.topology_binary_file)
]
retval[instance_id] = instance_cmd
return retval
# Returns the processes to handle streams, including the stream-mgr and the user code containing
# the stream logic of the topology
def _get_streaming_processes(self):
'''
Returns the processes to handle streams, including the stream-mgr and the user code containing
the stream logic of the topology
'''
retval = {}
instance_plans = self._get_instance_plans(self.packing_plan, self.shard)
instance_info = []
for instance_plan in instance_plans:
global_task_id = instance_plan.task_id
component_index = instance_plan.component_index
component_name = instance_plan.component_name
instance_id = "container_%s_%s_%d" % (str(self.shard), component_name, global_task_id)
instance_info.append((instance_id, component_name, global_task_id, component_index))
stmgr_cmd = [
self.stmgr_binary,
'--topology_name=%s' % self.topology_name,
'--topology_id=%s' % self.topology_id,
'--topologydefn_file=%s' % self.topology_defn_file,
'--zkhostportlist=%s' % self.state_manager_connection,
'--zkroot=%s' % self.state_manager_root,
'--stmgr_id=%s' % self.stmgr_ids[self.shard],
'--instance_ids=%s' % ','.join(map(lambda x: x[0], instance_info)),
'--myhost=%s' % self.master_host,
'--data_port=%s' % str(self.master_port),
'--local_data_port=%s' % str(self.tmaster_controller_port),
'--metricsmgr_port=%s' % str(self.metrics_manager_port),
'--shell_port=%s' % str(self.shell_port),
'--config_file=%s' % self.heron_internals_config_file,
'--override_config_file=%s' % self.override_config_file,
'--ckptmgr_port=%s' % str(self.checkpoint_manager_port),
'--ckptmgr_id=%s' % self.ckptmgr_ids[self.shard],
'--metricscachemgr_mode=%s' % self.metricscache_manager_mode.lower()]
retval[self.stmgr_ids[self.shard]] = stmgr_cmd
# metricsmgr_metrics_sink_config_file = 'metrics_sinks.yaml'
retval[self.metricsmgr_ids[self.shard]] = self._get_metricsmgr_cmd(
self.metricsmgr_ids[self.shard],
self.metrics_sinks_config_file,
self.metrics_manager_port
)
if self.is_stateful_topology:
retval.update(self._get_ckptmgr_process())
if self.pkg_type == 'jar' or self.pkg_type == 'tar':
retval.update(self._get_java_instance_cmd(instance_info))
elif self.pkg_type == 'pex':
retval.update(self._get_python_instance_cmd(instance_info))
elif self.pkg_type == 'so':
retval.update(self._get_cpp_instance_cmd(instance_info))
elif self.pkg_type == 'dylib':
retval.update(self._get_cpp_instance_cmd(instance_info))
else:
raise ValueError("Unrecognized package type: %s" % self.pkg_type)
return retval
def _get_ckptmgr_process(self):
''' Get the command to start the checkpoint manager process'''
ckptmgr_main_class = 'org.apache.heron.ckptmgr.CheckpointManager'
ckptmgr_ram_mb = self.checkpoint_manager_ram / (1024 * 1024)
ckptmgr_cmd = [os.path.join(self.heron_java_home, "bin/java"),
'-Xms%dM' % ckptmgr_ram_mb,
'-Xmx%dM' % ckptmgr_ram_mb,
'-XX:+PrintCommandLineFlags',
'-verbosegc',
'-XX:+PrintGCDetails',
'-XX:+PrintGCTimeStamps',
'-XX:+PrintGCDateStamps',
'-XX:+PrintGCCause',
'-XX:+UseGCLogFileRotation',
'-XX:NumberOfGCLogFiles=5',
'-XX:GCLogFileSize=100M',
'-XX:+PrintPromotionFailure',
'-XX:+PrintTenuringDistribution',
'-XX:+PrintHeapAtGC',
'-XX:+HeapDumpOnOutOfMemoryError',
'-XX:+UseConcMarkSweepGC',
'-XX:+UseConcMarkSweepGC',
'-Xloggc:log-files/gc.ckptmgr.log',
'-Djava.net.preferIPv4Stack=true',
'-cp',
self.checkpoint_manager_classpath,
ckptmgr_main_class,
'-t' + self.topology_name,
'-i' + self.topology_id,
'-c' + self.ckptmgr_ids[self.shard],
'-p' + self.checkpoint_manager_port,
'-f' + self.stateful_config_file,
'-g' + self.heron_internals_config_file]
retval = {}
retval[self.ckptmgr_ids[self.shard]] = ckptmgr_cmd
return retval
def _get_instance_plans(self, packing_plan, container_id):
"""
For the given packing_plan, return the container plan with the given container_id. If protobufs
supported maps, we could just get the plan by id, but it doesn't so we have a collection of
containers to iterate over.
"""
this_container_plan = None
for container_plan in packing_plan.container_plans:
if container_plan.id == container_id:
this_container_plan = container_plan
# make sure that our shard id is a valid one
assert this_container_plan is not None
return this_container_plan.instance_plans
# Returns the common heron support processes that all containers get, like the heron shell
def _get_heron_support_processes(self):
""" Get a map from all daemon services' name to the command to start them """
retval = {}
retval[self.heron_shell_ids[self.shard]] = [
'%s' % self.heron_shell_binary,
'--port=%s' % self.shell_port,
'--log_file_prefix=%s/heron-shell-%s.log' % (self.log_dir, self.shard),
'--secret=%s' % self.topology_id]
return retval
def _untar_if_needed(self):
if self.pkg_type == "tar":
os.system("tar -xvf %s" % self.topology_binary_file)
elif self.pkg_type == "pex":
os.system("unzip -qq -n %s" % self.topology_binary_file)
# pylint: disable=no-self-use
def _wait_process_std_out_err(self, name, process):
''' Wait for the termination of a process and log its stdout & stderr '''
proc.stream_process_stdout(process, stdout_log_fn(name))
process.wait()
def _run_process(self, name, cmd, env_to_exec=None):
Log.info("Running %s process as %s" % (name, ' '.join(cmd)))
try:
# stderr is redirected to stdout so that it can more easily be logged. stderr has a max buffer
# size and can cause the child process to deadlock if it fills up
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
env=env_to_exec, bufsize=1)
proc.async_stream_process_stdout(process, stdout_log_fn(name))
except Exception:
Log.info("Exception running command %s", cmd)
traceback.print_exc()
return process
def _run_blocking_process(self, cmd, is_shell=False, env_to_exec=None):
Log.info("Running blocking process as %s" % cmd)
try:
# stderr is redirected to stdout so that it can more easily be logged. stderr has a max buffer
# size and can cause the child process to deadlock if it fills up
process = subprocess.Popen(cmd, shell=is_shell, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, env=env_to_exec)
# wait for termination
self._wait_process_std_out_err(cmd, process)
except Exception:
Log.info("Exception running command %s", cmd)
traceback.print_exc()
# return the exit code
return process.returncode
def _kill_processes(self, commands):
# remove the command from processes_to_monitor and kill the process
with self.process_lock:
for command_name, command in commands.items():
for process_info in self.processes_to_monitor.values():
if process_info.name == command_name:
del self.processes_to_monitor[process_info.pid]
Log.info("Killing %s process with pid %d: %s" %
(process_info.name, process_info.pid, ' '.join(command)))
try:
process_info.process.terminate() # sends SIGTERM to process
except OSError as e:
if e.errno == 3: # No such process
Log.warn("Expected process %s with pid %d was not running, ignoring." %
(process_info.name, process_info.pid))
else:
raise e
def _start_processes(self, commands):
"""Start all commands and add them to the dict of processes to be monitored """
Log.info("Start processes")
processes_to_monitor = {}
# First start all the processes
for (name, command) in commands.items():
p = self._run_process(name, command, self.shell_env)
processes_to_monitor[p.pid] = ProcessInfo(p, name, command)
# Log down the pid file
log_pid_for_process(name, p.pid)
with self.process_lock:
self.processes_to_monitor.update(processes_to_monitor)
def start_process_monitor(self):
""" Monitor all processes in processes_to_monitor dict,
restarting any if they fail, up to max_runs times.
"""
# Now wait for any child to die
Log.info("Start process monitor")
while True:
if len(self.processes_to_monitor) > 0:
(pid, status) = os.wait()
with self.process_lock:
if pid in self.processes_to_monitor.keys():
old_process_info = self.processes_to_monitor[pid]
name = old_process_info.name
command = old_process_info.command
Log.info("%s (pid=%s) exited with status %d. command=%s" % (name, pid, status, command))
# Log the stdout & stderr of the failed process
self._wait_process_std_out_err(name, old_process_info.process)
# Just make it world readable
if os.path.isfile("core.%d" % pid):
os.system("chmod a+r core.%d" % pid)
if old_process_info.attempts >= self.max_runs:
Log.info("%s exited too many times" % name)
sys.exit(1)
time.sleep(self.interval_between_runs)
p = self._run_process(name, command, self.shell_env)
del self.processes_to_monitor[pid]
self.processes_to_monitor[p.pid] =\
ProcessInfo(p, name, command, old_process_info.attempts + 1)
# Log down the pid file
log_pid_for_process(name, p.pid)
def get_commands_to_run(self):
# During shutdown the watch might get triggered with the empty packing plan
if len(self.packing_plan.container_plans) == 0:
return {}
if self.shard == 0:
commands = self._get_tmaster_processes()
else:
self._untar_if_needed()
commands = self._get_streaming_processes()
# Attach daemon processes
commands.update(self._get_heron_support_processes())
return commands
def get_command_changes(self, current_commands, updated_commands):
"""
Compares the current command with updated command to return a 3-tuple of dicts,
keyed by command name: commands_to_kill, commands_to_keep and commands_to_start.
"""
commands_to_kill = {}
commands_to_keep = {}
commands_to_start = {}
# if the current command has a matching command in the updated commands we keep it
# otherwise we kill it
for current_name, current_command in current_commands.items():
# We don't restart tmaster since it watches the packing plan and updates itself. The stream
# manager is restarted just to reset state, but we could update it to do so without a restart
if current_name in updated_commands.keys() and \
current_command == updated_commands[current_name] and \
not current_name.startswith('stmgr-'):
commands_to_keep[current_name] = current_command
else:
commands_to_kill[current_name] = current_command
# updated commands not in the keep list need to be started
for updated_name, updated_command in updated_commands.items():
if updated_name not in commands_to_keep.keys():
commands_to_start[updated_name] = updated_command
return commands_to_kill, commands_to_keep, commands_to_start
def launch(self):
''' Determines the commands to be run and compares them with the existing running commands.
Then starts new ones required and kills old ones no longer required.
'''
with self.process_lock:
current_commands = dict(map((lambda process: (process.name, process.command)),
self.processes_to_monitor.values()))
updated_commands = self.get_commands_to_run()
# get the commands to kill, keep and start
commands_to_kill, commands_to_keep, commands_to_start = \
self.get_command_changes(current_commands, updated_commands)
Log.info("current commands: %s" % sorted(current_commands.keys()))
Log.info("new commands : %s" % sorted(updated_commands.keys()))
Log.info("commands_to_kill: %s" % sorted(commands_to_kill.keys()))
Log.info("commands_to_keep: %s" % sorted(commands_to_keep.keys()))
Log.info("commands_to_start: %s" % sorted(commands_to_start.keys()))
self._kill_processes(commands_to_kill)
self._start_processes(commands_to_start)
Log.info("Launch complete - processes killed=%s kept=%s started=%s monitored=%s" %
(len(commands_to_kill), len(commands_to_keep),
len(commands_to_start), len(self.processes_to_monitor)))
# pylint: disable=global-statement
def start_state_manager_watches(self):
"""
Receive updates to the packing plan from the statemgrs and update processes as needed.
"""
Log.info("Start state manager watches")
statemgr_config = StateMgrConfig()
statemgr_config.set_state_locations(configloader.load_state_manager_locations(
self.cluster, state_manager_config_file=self.state_manager_config_file,
overrides={"heron.statemgr.connection.string": self.state_manager_connection}))
try:
self.state_managers = statemanagerfactory.get_all_state_managers(statemgr_config)
for state_manager in self.state_managers:
state_manager.start()
except Exception as ex:
Log.error("Found exception while initializing state managers: %s. Bailing out..." % ex)
traceback.print_exc()
sys.exit(1)
# pylint: disable=unused-argument
def on_packing_plan_watch(state_manager, new_packing_plan):
Log.debug("State watch triggered for PackingPlan update on shard %s. Existing: %s, New: %s" %
(self.shard, str(self.packing_plan), str(new_packing_plan)))
if self.packing_plan != new_packing_plan:
Log.info("PackingPlan change detected on shard %s, relaunching effected processes."
% self.shard)
self.update_packing_plan(new_packing_plan)
Log.info("Updating executor processes")
self.launch()
else:
Log.info(
"State watch triggered for PackingPlan update but plan not changed so not relaunching.")
for state_manager in self.state_managers:
# The callback function with the bound
# state_manager as first variable.
onPackingPlanWatch = functools.partial(on_packing_plan_watch, state_manager)
state_manager.get_packing_plan(self.topology_name, onPackingPlanWatch)
Log.info("Registered state watch for packing plan changes with state manager %s." %
str(state_manager))
def stop_state_manager_watches(self):
Log.info("Stopping state managers")
for state_manager in self.state_managers:
state_manager.stop()
def setup(executor):
"""Set up log, process and signal handlers"""
# pylint: disable=unused-argument
def signal_handler(signal_to_handle, frame):
# We would do nothing here but just exit
# Just catch the SIGTERM and then cleanup(), registered with atexit, would invoke
Log.info('signal_handler invoked with signal %s', signal_to_handle)
executor.stop_state_manager_watches()
sys.exit(signal_to_handle)
def cleanup():
"""Handler to trigger when receiving the SIGTERM signal
Do cleanup inside this method, including:
1. Terminate all children processes
"""
Log.info('Executor terminated; exiting all process in executor.')
# We would not wait or check whether process spawned dead or not
os.killpg(0, signal.SIGTERM)
# Redirect stdout and stderr to files in append mode
# The filename format is heron-executor-<container_id>.stdxxx
shardid = executor.shard
log.configure(logfile='heron-executor-%s.stdout' % shardid)
pid = os.getpid()
sid = os.getsid(pid)
# POSIX prohibits the change of the process group ID of a session leader
if pid <> sid:
Log.info('Set up process group; executor becomes leader')
os.setpgrp() # create new process group, become its leader
Log.info('Register the SIGTERM signal handler')
signal.signal(signal.SIGTERM, signal_handler)
Log.info('Register the atexit clean up')
atexit.register(cleanup)
def start(executor):
"""Set up environment and start executor"""
setup(executor)
# Start state manager watches which are responsible for monitoring states and
# launch processes
executor.start_state_manager_watches()
# Start process monitor which are responsible for restarting processes when
# they are dead. This is the main loop of executor
executor.start_process_monitor()
def main():
"""Register exit handlers, initialize the executor and run it."""
# Since Heron on YARN runs as headless users, pex compiled
# binaries should be exploded into the container working
# directory. In order to do this, we need to set the
# PEX_ROOT shell environment before forking the processes
shell_env = os.environ.copy()
shell_env["PEX_ROOT"] = os.path.join(os.path.abspath('.'), ".pex")
# Instantiate the executor, bind it to signal handlers and launch it
executor = HeronExecutor(sys.argv, shell_env)
executor.initialize()
start(executor)
if __name__ == "__main__":
main()
|
py | b411b3e7729eb41b452001812160483d25d4c31b | from .dataset import SatelliteDataset, DatasetTransform, MultiTransforms, TransformPCA
from .utility import create_dataloaders, train, seed_everything, test, preprocess_dataset
from .metrics import get_metrics
|
py | b411b45c8c30c09c114b086bd58631e39f4a3065 | import sublime, os
def plugin_loaded():
dpi_settings = sublime.load_settings("Preferences.sublime-settings")
dpi_settings.set("dpi_scale", float(os.environ["UI_SCALE_FACTOR"]))
sublime.save_settings("Preferences.sublime-settings")
|
py | b411b4893248a3c39dabad1ea4771854ad049ba3 | from unittest import mock
from unittest.mock import call
import pytest
from django.test import TestCase
from organisations.models import Organisation, Subscription
@pytest.mark.django_db
class OrganisationTestCase(TestCase):
def test_can_create_organisation_with_and_without_webhook_notification_email(self):
organisation_1 = Organisation.objects.create(name="Test org")
organisation_2 = Organisation.objects.create(
name="Test org with webhook email",
webhook_notification_email="[email protected]",
)
self.assertTrue(organisation_1.name)
self.assertTrue(organisation_2.name)
class SubscriptionTestCase(TestCase):
def setUp(self) -> None:
self.organisation = Organisation.objects.create(name="Test org")
def tearDown(self) -> None:
Subscription.objects.all().delete()
def test_max_seats_set_as_one_if_subscription_has_no_subscription_id(self):
# Given
subscription = Subscription(organisation=self.organisation)
# When
subscription.save()
# Then
assert subscription.max_seats == 1
|
py | b411b5a7d3831e6927b9ac2b0126cbd1a2b93e6f | #!/usr/bin/env python3
import os
from setuptools import setup, find_packages
def get_readme():
return open(os.path.join(os.path.dirname(__file__), 'README.rst')).read()
setup(
author="Julio Gonzalez Altamirano",
author_email='[email protected]',
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Internet :: WWW/HTTP',
],
description="Easily post tweets from an image library.",
entry_points={
'console_scripts': [
'goldfinchsong=goldfinchsong.cli:run',
],
},
install_requires=['click', 'tinydb', 'tweepy'],
keywords="twitter api images",
license="MIT",
long_description=get_readme(),
name='goldfinchsong',
packages=find_packages(include=['goldfinchsong', 'goldfinchsong.*'],
exclude=['tests', 'tests.*']),
platforms=['Any'],
url='https://github.com/jga/goldfinchsong',
version='0.1.4',
)
|
py | b411b5b14268711376d8d536c930b47fcc8ecde4 | from ..remote import RemoteModel
class ManagementServerSectionGridRemote(RemoteModel):
"""
| ``DeviceID:`` none
| ``attribute type:`` string
| ``Network:`` none
| ``attribute type:`` string
| ``Collector:`` none
| ``attribute type:`` string
| ``DeviceIPDotted:`` none
| ``attribute type:`` string
| ``DeviceIPNumeric:`` none
| ``attribute type:`` string
| ``DeviceName:`` none
| ``attribute type:`` string
| ``count:`` none
| ``attribute type:`` string
| ``DeviceModel:`` none
| ``attribute type:`` string
| ``DeviceVersion:`` none
| ``attribute type:`` string
| ``DeviceType:`` none
| ``attribute type:`` string
| ``DeviceMAC:`` none
| ``attribute type:`` string
| ``DeviceVendor:`` none
| ``attribute type:`` string
"""
properties = ("DeviceID",
"Network",
"Collector",
"DeviceIPDotted",
"DeviceIPNumeric",
"DeviceName",
"count",
"DeviceModel",
"DeviceVersion",
"DeviceType",
"DeviceMAC",
"DeviceVendor",
)
|
py | b411b60829c807e097a3d9dbe31771b7656e7722 | """app URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include('quote.urls'))
]
|
py | b411b61d011838a689c5ca959ee270e74def1969 | import os
import sys
from dateutil.parser import parse
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'repository.settings')
from django.conf import settings
if not hasattr(settings, 'BASE_DIR'): raise Exception('could not load settings.py')
from adapters.certuk_mod.builder import customizations as cert_builder
from edge.generic import EdgeObject
from mongoengine.connection import get_db
def rehash(timestamp):
"""
A script to recalculate all observable data hashes according to CERT requirements (can safely be run multiple times)
"""
page_size = 5000
cert_builder.apply_customizations()
db = get_db()
base_query = {
'type': 'obs',
'data.summary.type': {
'$ne': 'ObservableComposition'
}
}
if timestamp:
base_query.update({'created_on': {
'$gte': timestamp
}})
cursor = db.stix.find(base_query, {'_id': 1})
bulk = db.stix.initialize_unordered_bulk_op()
update_count = 0
def bulk_execute(bulk):
try:
bulk.execute()
except Exception:
pass
return db.stix.initialize_unordered_bulk_op()
for row in cursor:
update_count += 1
stix_id = row['_id']
eo = EdgeObject.load(stix_id)
ao = eo.to_ApiObject()
new_hash = ao.localhash()
bulk.find({
'_id': stix_id,
'data.hash': {'$ne': new_hash}
}).update({
'$set': {
'data.hash': new_hash
}
})
if not update_count % page_size:
bulk = bulk_execute(bulk)
if update_count % page_size:
bulk_execute(bulk)
if __name__ == '__main__':
timestamp = None
args = sys.argv
if len(args) == 2:
try:
timestamp = parse(args[1])
except Exception as e:
raise e
rehash(timestamp)
|
py | b411b696bdd52c373fb8c1c547132a4290ea6161 | # Copyright (c) 2015 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
from typing import Optional, TYPE_CHECKING
from UM.Application import Application
from UM.Math.Polygon import Polygon
from UM.Qt.QtApplication import QtApplication
from UM.Scene.SceneNode import SceneNode
from UM.Resources import Resources
from UM.Math.Color import Color
from UM.Mesh.MeshBuilder import MeshBuilder # To create a mesh to display the convex hull with.
from UM.View.GL.OpenGL import OpenGL
if TYPE_CHECKING:
from UM.Mesh.MeshData import MeshData
class ConvexHullNode(SceneNode):
shader = None # To prevent the shader from being re-built over and over again, only load it once.
## Convex hull node is a special type of scene node that is used to display an area, to indicate the
# location an object uses on the buildplate. This area (or area's in case of one at a time printing) is
# then displayed as a transparent shadow. If the adhesion type is set to raft, the area is extruded
# to represent the raft as well.
def __init__(self, node: SceneNode, hull: Optional[Polygon], thickness: float, parent: Optional[SceneNode] = None) -> None:
super().__init__(parent)
self.setCalculateBoundingBox(False)
self._original_parent = parent
# Color of the drawn convex hull
if not Application.getInstance().getIsHeadLess():
theme = QtApplication.getInstance().getTheme()
if theme:
self._color = Color(*theme.getColor("convex_hull").getRgb())
else:
self._color = Color(0, 0, 0)
else:
self._color = Color(0, 0, 0)
# The y-coordinate of the convex hull mesh. Must not be 0, to prevent z-fighting.
self._mesh_height = 0.1
self._thickness = thickness
# The node this mesh is "watching"
self._node = node
self._convex_hull_head_mesh = None # type: Optional[MeshData]
self._node.decoratorsChanged.connect(self._onNodeDecoratorsChanged)
self._onNodeDecoratorsChanged(self._node)
self._hull = hull
if self._hull:
hull_mesh_builder = MeshBuilder()
if hull_mesh_builder.addConvexPolygonExtrusion(
self._hull.getPoints()[::-1], # bottom layer is reversed
self._mesh_height - thickness, self._mesh_height, color = self._color):
hull_mesh = hull_mesh_builder.build()
self.setMeshData(hull_mesh)
def getHull(self):
return self._hull
def getThickness(self):
return self._thickness
def getWatchedNode(self):
return self._node
def render(self, renderer):
if not ConvexHullNode.shader:
ConvexHullNode.shader = OpenGL.getInstance().createShaderProgram(Resources.getPath(Resources.Shaders, "transparent_object.shader"))
ConvexHullNode.shader.setUniformValue("u_diffuseColor", self._color)
ConvexHullNode.shader.setUniformValue("u_opacity", 0.6)#shade color volume
if self.getParent():
if self.getMeshData() and isinstance(self._node, SceneNode) and self._node.callDecoration("getBuildPlateNumber") == Application.getInstance().getMultiBuildPlateModel().activeBuildPlate:
renderer.queueNode(self, transparent = True, shader = ConvexHullNode.shader, backface_cull = True, sort = -8)
if self._convex_hull_head_mesh:
renderer.queueNode(self, shader = ConvexHullNode.shader, transparent = True, mesh = self._convex_hull_head_mesh, backface_cull = True, sort = -8)
return True
def _onNodeDecoratorsChanged(self, node: SceneNode) -> None:
convex_hull_head = self._node.callDecoration("getConvexHullHead")
if convex_hull_head:
convex_hull_head_builder = MeshBuilder()
convex_hull_head_builder.addConvexPolygon(convex_hull_head.getPoints(), self._mesh_height - self._thickness)
self._convex_hull_head_mesh = convex_hull_head_builder.build()
if not node:
return
|
py | b411b728e9c368585a3b4b3b83fc4936793f9b91 | #
# PySNMP MIB module OMNI-gx2drr3x-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/OMNI-gx2drr3x-MIB
# Produced by pysmi-0.3.4 at Wed May 1 14:33:52 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ConstraintsUnion, ValueSizeConstraint, ConstraintsIntersection, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsUnion", "ValueSizeConstraint", "ConstraintsIntersection", "ValueRangeConstraint")
gx2Drr3x, = mibBuilder.importSymbols("GX2HFC-MIB", "gx2Drr3x")
motproxies, gi = mibBuilder.importSymbols("NLS-BBNIDENT-MIB", "motproxies", "gi")
trapNetworkElemSerialNum, trapNetworkElemAlarmStatus, trapNetworkElemOperState, trapChangedValueInteger, trapNetworkElemAdminState, trapNETrapLastTrapTimeStamp, trapPerceivedSeverity, trapNetworkElemAvailStatus, trapChangedObjectId, trapChangedValueDisplayString, trapNetworkElemModelNumber, trapIdentifier, trapText = mibBuilder.importSymbols("NLSBBN-TRAPS-MIB", "trapNetworkElemSerialNum", "trapNetworkElemAlarmStatus", "trapNetworkElemOperState", "trapChangedValueInteger", "trapNetworkElemAdminState", "trapNETrapLastTrapTimeStamp", "trapPerceivedSeverity", "trapNetworkElemAvailStatus", "trapChangedObjectId", "trapChangedValueDisplayString", "trapNetworkElemModelNumber", "trapIdentifier", "trapText")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
sysUpTime, = mibBuilder.importSymbols("SNMPv2-MIB", "sysUpTime")
Gauge32, MibScalar, MibTable, MibTableRow, MibTableColumn, IpAddress, Integer32, iso, NotificationType, TimeTicks, MibIdentifier, NotificationType, Bits, ObjectIdentity, Counter64, Counter32, Unsigned32, ModuleIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "Gauge32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "IpAddress", "Integer32", "iso", "NotificationType", "TimeTicks", "MibIdentifier", "NotificationType", "Bits", "ObjectIdentity", "Counter64", "Counter32", "Unsigned32", "ModuleIdentity")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
class Float(Counter32):
pass
gx2drr3xDescriptor = MibIdentifier((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 1))
gx2drr3xAnalogTable = MibTable((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2), )
if mibBuilder.loadTexts: gx2drr3xAnalogTable.setStatus('mandatory')
if mibBuilder.loadTexts: gx2drr3xAnalogTable.setDescription('This table contains gx2drr3x specific analog parameters with nominal,limits and current values.')
gx2drr3xAnalogEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1), ).setIndexNames((0, "OMNI-gx2drr3x-MIB", "gx2drr3xAnalogTableIndex"))
if mibBuilder.loadTexts: gx2drr3xAnalogEntry.setStatus('mandatory')
if mibBuilder.loadTexts: gx2drr3xAnalogEntry.setDescription('This list contains the analog parameters and descriptions.')
gx2drr3xDigitalTable = MibTable((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 3), )
if mibBuilder.loadTexts: gx2drr3xDigitalTable.setStatus('mandatory')
if mibBuilder.loadTexts: gx2drr3xDigitalTable.setDescription('This table contains gx2drr3x specific digital parameters with nominal and current values.')
gx2drr3xDigitalEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 3, 2), ).setIndexNames((0, "OMNI-gx2drr3x-MIB", "gx2drr3xDigitalTableIndex"))
if mibBuilder.loadTexts: gx2drr3xDigitalEntry.setStatus('mandatory')
if mibBuilder.loadTexts: gx2drr3xDigitalEntry.setDescription('This list contains digital parameters and descriptions.')
gx2drr3xStatusTable = MibTable((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 4), )
if mibBuilder.loadTexts: gx2drr3xStatusTable.setStatus('mandatory')
if mibBuilder.loadTexts: gx2drr3xStatusTable.setDescription('This table contains gx2drr3x specific status parameters with nominal and current values.')
gx2drr3xStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 4, 3), ).setIndexNames((0, "OMNI-gx2drr3x-MIB", "gx2drr3xStatusTableIndex"))
if mibBuilder.loadTexts: gx2drr3xStatusEntry.setStatus('mandatory')
if mibBuilder.loadTexts: gx2drr3xStatusEntry.setDescription('This list contains Status parameters and descriptions.')
gx2drr3xFactoryTable = MibTable((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 5), )
if mibBuilder.loadTexts: gx2drr3xFactoryTable.setStatus('mandatory')
if mibBuilder.loadTexts: gx2drr3xFactoryTable.setDescription('This table contains gx2drr3x specific factory setting parameters with nominal and current values.')
gx2drr3xFactoryEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 5, 4), ).setIndexNames((0, "OMNI-gx2drr3x-MIB", "gx2drr3xFactoryTableIndex"))
if mibBuilder.loadTexts: gx2drr3xFactoryEntry.setStatus('mandatory')
if mibBuilder.loadTexts: gx2drr3xFactoryEntry.setDescription('This list contains Factory setting parameters and descriptions.')
gx2drr3xAnalogTableIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: gx2drr3xAnalogTableIndex.setStatus('mandatory')
if mibBuilder.loadTexts: gx2drr3xAnalogTableIndex.setDescription('The value of this object identifies the network element. This index is equal to the hfcCommonTableIndex for the same element.')
drrlabelRFAAttenuation = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 80))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrlabelRFAAttenuation.setStatus('optional')
if mibBuilder.loadTexts: drrlabelRFAAttenuation.setDescription('The value of this object provides the label of the RF Channel A Attenuation Analog parameter.')
drruomRFAAttenuation = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 80))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drruomRFAAttenuation.setStatus('optional')
if mibBuilder.loadTexts: drruomRFAAttenuation.setDescription('The value of this object provides the Unit of Measure of the RF Channel A Attenuation Analog parameter.')
drrmajorHighRFAAttenuation = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 4), Float()).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrmajorHighRFAAttenuation.setStatus('mandatory')
if mibBuilder.loadTexts: drrmajorHighRFAAttenuation.setDescription('The value of this object provides the Major High alarm value of the RF Channel A Attenuation Analog parameter. This parameter has not been used by the module and set as the invalid float value (0xFFFFFFFF)')
drrmajorLowRFAAttenuation = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 5), Float()).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrmajorLowRFAAttenuation.setStatus('mandatory')
if mibBuilder.loadTexts: drrmajorLowRFAAttenuation.setDescription('The value of this object provides the Major Low alarm value of the RF Channel A Attenuation Analog parameter. This parameter has not been used by the module and set as the invalid float value (0xFFFFFFFF) ')
drrminorHighRFAAttenuation = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 6), Float()).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrminorHighRFAAttenuation.setStatus('mandatory')
if mibBuilder.loadTexts: drrminorHighRFAAttenuation.setDescription('The value of this object provides the Minor High alarm value of the RF Channel A Attenuation Analog parameter. This parameter has not been used by the module and set as the invalid float value (0xFFFFFFFF) ')
drrminorLowRFAAttenuation = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 7), Float()).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrminorLowRFAAttenuation.setStatus('mandatory')
if mibBuilder.loadTexts: drrminorLowRFAAttenuation.setDescription('The value of this object provides the Minor Low alarm value of the RF Channel A Attenuation Analog parameter. This parameter has not been used by the module and set as the invalid float value (0xFFFFFFFF) ')
drrcurrentValueRFAAttenuation = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 8), Float()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: drrcurrentValueRFAAttenuation.setStatus('mandatory')
if mibBuilder.loadTexts: drrcurrentValueRFAAttenuation.setDescription('The value of this object provides the Current value of the RF Channel A Attenuation Analog parameter. This value is a floating point number that is represented as an IEEE 32 bit number.')
drrstateFlagRFAAttenuation = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("hidden", 1), ("read-only", 2), ("updateable", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrstateFlagRFAAttenuation.setStatus('mandatory')
if mibBuilder.loadTexts: drrstateFlagRFAAttenuation.setDescription('The value of this object provides the state of the RF Channel A Attenuation Analog parameter. (0-Hidden 1-Read-Only, 2-Updateable).')
drrminValueRFAAttenuation = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 10), Float()).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrminValueRFAAttenuation.setStatus('mandatory')
if mibBuilder.loadTexts: drrminValueRFAAttenuation.setDescription('The value of this object provides the minimum value the RF Channel A Attenuation Analog parameter can achive. This value is a floating point number that is represented as an IEEE 32 bit number.')
drrmaxValueRFAAttenuation = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 11), Float()).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrmaxValueRFAAttenuation.setStatus('mandatory')
if mibBuilder.loadTexts: drrmaxValueRFAAttenuation.setDescription('The value of this object provides the maximum value the RF Channel A Attenuation Analog parameter can achive. This value is a floating point number that is represented as an IEEE 32 bit number.')
drralarmStateRFAAttenuation = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("noAlarm", 1), ("majorLowAlarm", 2), ("minorLowAlarm", 3), ("minorHighAlarm", 4), ("majorHighAlarm", 5), ("informational", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drralarmStateRFAAttenuation.setStatus('mandatory')
if mibBuilder.loadTexts: drralarmStateRFAAttenuation.setDescription('The value of this object provides the curent alarm state of the RF Channel A Attenuation Analog parameter.')
drrlabelRFBAttenuation = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 13), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 80))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrlabelRFBAttenuation.setStatus('optional')
if mibBuilder.loadTexts: drrlabelRFBAttenuation.setDescription('The value of this object provides the label of the RF Channel B Attenuation Analog parameter.')
drruomRFBAttenuation = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 14), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 80))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drruomRFBAttenuation.setStatus('optional')
if mibBuilder.loadTexts: drruomRFBAttenuation.setDescription('The value of this object provides the Unit of Measure of the RF Channel B Attenuation Analog parameter.')
drrmajorHighRFBAttenuation = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 15), Float()).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrmajorHighRFBAttenuation.setStatus('mandatory')
if mibBuilder.loadTexts: drrmajorHighRFBAttenuation.setDescription('The value of this object provides the Major High alarm value of the RF Channel B Attenuation Analog parameter. This parameter has not been used by the module and set as the invalid float value (0xFFFFFFFF) ')
drrmajorLowRFBAttenuation = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 16), Float()).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrmajorLowRFBAttenuation.setStatus('mandatory')
if mibBuilder.loadTexts: drrmajorLowRFBAttenuation.setDescription('The value of this object provides the Major Low alarm value of the RF Channel B Attenuation Analog parameter. This parameter has not been used by the module and set as the invalid float value (0xFFFFFFFF) ')
drrminorHighRFBAttenuation = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 17), Float()).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrminorHighRFBAttenuation.setStatus('mandatory')
if mibBuilder.loadTexts: drrminorHighRFBAttenuation.setDescription('The value of this object provides the Minor High alarm value of the RF Channel B Attenuation Analog parameter. This parameter has not been used by the module and set as the invalid float value (0xFFFFFFFF) ')
drrminorLowRFBAttenuation = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 18), Float()).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrminorLowRFBAttenuation.setStatus('mandatory')
if mibBuilder.loadTexts: drrminorLowRFBAttenuation.setDescription('The value of this object provides the Minor Low alarm value of the RF Channel B Attenuation Analog parameter. This parameter has not been used by the module and set as the invalid float value (0xFFFFFFFF) ')
drrcurrentValueRFBAttenuation = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 19), Float()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: drrcurrentValueRFBAttenuation.setStatus('mandatory')
if mibBuilder.loadTexts: drrcurrentValueRFBAttenuation.setDescription('The value of this object provides the Current value of the RF Channel B Attenuation Analog parameter. This value is a floating point number that is represented as an IEEE 32 bit number.')
drrstateFlagRFBAttenuation = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 20), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("hidden", 1), ("read-only", 2), ("updateable", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrstateFlagRFBAttenuation.setStatus('mandatory')
if mibBuilder.loadTexts: drrstateFlagRFBAttenuation.setDescription('The value of this object provides the state of the RF Channel B Attenuation Analog parameter. (0-Hidden 1-Read-Only, 2-Updateable).')
drrminValueRFBAttenuation = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 21), Float()).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrminValueRFBAttenuation.setStatus('mandatory')
if mibBuilder.loadTexts: drrminValueRFBAttenuation.setDescription('The value of this object provides the minimum value the RF Channel B Attenuation Analog parameter can achive. This value is a floating point number that is represented as an IEEE 32 bit number.')
drrmaxValueRFBAttenuation = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 22), Float()).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrmaxValueRFBAttenuation.setStatus('mandatory')
if mibBuilder.loadTexts: drrmaxValueRFBAttenuation.setDescription('The value of this object provides the maximum value the RF Channel B Attenuation Analog parameter can achive. This value is a floating point number that is represented as an IEEE 32 bit number.')
drralarmStateRFBAttenuation = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 23), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("noAlarm", 1), ("majorLowAlarm", 2), ("minorLowAlarm", 3), ("minorHighAlarm", 4), ("majorHighAlarm", 5), ("informational", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drralarmStateRFBAttenuation.setStatus('mandatory')
if mibBuilder.loadTexts: drralarmStateRFBAttenuation.setDescription('The value of this object provides the curent alarm state of the RF Channel B Attenuation Analog parameter.')
drrlabelRFCAttenuation = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 24), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 80))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrlabelRFCAttenuation.setStatus('optional')
if mibBuilder.loadTexts: drrlabelRFCAttenuation.setDescription('The value of this object provides the label of the RF Channel C Attenuation Analog parameter.')
drruomRFCAttenuation = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 25), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 80))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drruomRFCAttenuation.setStatus('optional')
if mibBuilder.loadTexts: drruomRFCAttenuation.setDescription('The value of this object provides the Unit of Measure of the RF Channel C Attenuation Analog parameter.')
drrmajorHighRFCAttenuation = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 26), Float()).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrmajorHighRFCAttenuation.setStatus('mandatory')
if mibBuilder.loadTexts: drrmajorHighRFCAttenuation.setDescription('The value of this object provides the Major High alarm value of the RF Channel C Attenuation Analog parameter. This parameter has not been used by the module and set as the invalid float value (0xFFFFFFFF) ')
drrmajorLowRFCAttenuation = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 27), Float()).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrmajorLowRFCAttenuation.setStatus('mandatory')
if mibBuilder.loadTexts: drrmajorLowRFCAttenuation.setDescription('The value of this object provides the Major Low alarm value of the RF Channel C Attenuation Analog parameter. This parameter has not been used by the module and set as the invalid float value (0xFFFFFFFF) ')
drrminorHighRFCAttenuation = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 28), Float()).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrminorHighRFCAttenuation.setStatus('mandatory')
if mibBuilder.loadTexts: drrminorHighRFCAttenuation.setDescription('The value of this object provides the Minor High alarm value of the RF Channel C Attenuation Analog parameter. This parameter has not been used by the module and set as the invalid float value (0xFFFFFFFF) ')
drrminorLowRFCAttenuation = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 29), Float()).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrminorLowRFCAttenuation.setStatus('mandatory')
if mibBuilder.loadTexts: drrminorLowRFCAttenuation.setDescription('The value of this object provides the Minor Low alarm value of the RF Channel C Attenuation Analog parameter. This parameter has not been used by the module and set as the invalid float value (0xFFFFFFFF) ')
drrcurrentValueRFCAttenuation = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 30), Float()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: drrcurrentValueRFCAttenuation.setStatus('mandatory')
if mibBuilder.loadTexts: drrcurrentValueRFCAttenuation.setDescription('The value of this object provides the Current value of the RF Channel C Attenuation Analog parameter. This value is a floating point number that is represented as an IEEE 32 bit number.')
drrstateFlagRFCAttenuation = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 31), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("hidden", 1), ("read-only", 2), ("updateable", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrstateFlagRFCAttenuation.setStatus('mandatory')
if mibBuilder.loadTexts: drrstateFlagRFCAttenuation.setDescription('The value of this object provides the state of the RF Channel C Attenuation Analog parameter. (0-Hidden 1-Read-Only, 2-Updateable).')
drrminValueRFCAttenuation = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 32), Float()).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrminValueRFCAttenuation.setStatus('mandatory')
if mibBuilder.loadTexts: drrminValueRFCAttenuation.setDescription('The value of this object provides the minimum value the RF Channel C Attenuation Analog parameter can achive. This value is a floating point number that is represented as an IEEE 32 bit number.')
drrmaxValueRFCAttenuation = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 33), Float()).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrmaxValueRFCAttenuation.setStatus('mandatory')
if mibBuilder.loadTexts: drrmaxValueRFCAttenuation.setDescription('The value of this object provides the maximum value the RF Channel C Attenuation Analog parameter can achive. This value is a floating point number that is represented as an IEEE 32 bit number.')
drralarmStateRFCAttenuation = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 34), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("noAlarm", 1), ("majorLowAlarm", 2), ("minorLowAlarm", 3), ("minorHighAlarm", 4), ("majorHighAlarm", 5), ("informational", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drralarmStateRFCAttenuation.setStatus('mandatory')
if mibBuilder.loadTexts: drralarmStateRFCAttenuation.setDescription('The value of this object provides the curent alarm state of the RF Channel C Attenuation Analog parameter.')
drrlabelTrippointLevel = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 35), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 80))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrlabelTrippointLevel.setStatus('optional')
if mibBuilder.loadTexts: drrlabelTrippointLevel.setDescription('The value of this object provides the label of the trip point level Analog parameter.')
drruomTrippointLevel = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 36), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 80))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drruomTrippointLevel.setStatus('optional')
if mibBuilder.loadTexts: drruomTrippointLevel.setDescription('The value of this object provides the Unit of Measure of the trip point level Analog parameter.')
drrmajorHighTrippointLevel = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 37), Float()).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrmajorHighTrippointLevel.setStatus('mandatory')
if mibBuilder.loadTexts: drrmajorHighTrippointLevel.setDescription('The value of this object provides the Major High alarm value of the trip point level Analog parameter. This parameter has not been used by the module and set as the invalid float value (0xFFFFFFFF) ')
drrmajorLowTrippointLevel = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 38), Float()).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrmajorLowTrippointLevel.setStatus('mandatory')
if mibBuilder.loadTexts: drrmajorLowTrippointLevel.setDescription('The value of this object provides the Major Low alarm value of the trip point level Analog parameter. This parameter has not been used by the module and set as the invalid float value (0xFFFFFFFF) ')
drrminorHighTrippointLevel = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 39), Float()).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrminorHighTrippointLevel.setStatus('mandatory')
if mibBuilder.loadTexts: drrminorHighTrippointLevel.setDescription('The value of this object provides the Minor High alarm value of the trip point level Analog parameter. This parameter has not been used by the module and set as the invalid float value (0xFFFFFFFF) ')
drrminorLowTrippointLevel = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 40), Float()).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrminorLowTrippointLevel.setStatus('mandatory')
if mibBuilder.loadTexts: drrminorLowTrippointLevel.setDescription('The value of this object provides the Minor Low alarm value of the trip point level Analog parameter. This parameter has not been used by the module and set as the invalid float value (0xFFFFFFFF) ')
drrcurrentValueTrippointLevel = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 41), Float()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: drrcurrentValueTrippointLevel.setStatus('mandatory')
if mibBuilder.loadTexts: drrcurrentValueTrippointLevel.setDescription('The value of this object provides the Current value of the trippoint level Analog parameter. The trip point level set the custumer preferred optical input level. This parameter combined with trippoint mode(inside digital table) decides the trippoint alarm and RF channel output impedance. when optical power level is lower than trip point level(Set by this parameter) 1) When trip point mode is ALARM_SWITCH, trip point alarm will be generated and RF output impedance will be changed to high impedance. 2) When trippoint mode is ALARM_ONLY, trippoint alarm will be generated but RF output inpedance remains to be low impedance. This trip point level value is a floating point number that is represented as an IEEE 32 bit number.')
drrstateFlagTrippointLevel = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 42), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("hidden", 1), ("read-only", 2), ("updateable", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrstateFlagTrippointLevel.setStatus('mandatory')
if mibBuilder.loadTexts: drrstateFlagTrippointLevel.setDescription('The value of this object provides the state of the trip point level Analog parameter. (0-Hidden 1-Read-Only, 2-Updateable).')
drrminValueTrippointLevel = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 43), Float()).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrminValueTrippointLevel.setStatus('mandatory')
if mibBuilder.loadTexts: drrminValueTrippointLevel.setDescription('The value of this object provides the minimum value the trip point level Analog parameter can achive. This value is a floating point number that is represented as an IEEE 32 bit number.')
drrmaxValueTrippointLevel = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 44), Float()).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrmaxValueTrippointLevel.setStatus('mandatory')
if mibBuilder.loadTexts: drrmaxValueTrippointLevel.setDescription('The value of this object provides the maximum value the trip point level Analog parameter can achive. This value is a floating point number that is represented as an IEEE 32 bit number.')
drralarmStateTrippointLevel = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 45), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("noAlarm", 1), ("majorLowAlarm", 2), ("minorLowAlarm", 3), ("minorHighAlarm", 4), ("majorHighAlarm", 5), ("informational", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drralarmStateTrippointLevel.setStatus('mandatory')
if mibBuilder.loadTexts: drralarmStateTrippointLevel.setDescription('The value of this object provides the curent alarm state of the trip point level Analog parameter.')
drrlabelOptCurrent = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 46), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 80))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrlabelOptCurrent.setStatus('optional')
if mibBuilder.loadTexts: drrlabelOptCurrent.setDescription('The value of this object provides the label of the Optical Power Current Analog parameter.')
drruomOptCurrent = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 47), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 80))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drruomOptCurrent.setStatus('optional')
if mibBuilder.loadTexts: drruomOptCurrent.setDescription('The value of this object provides the Unit of Measure of the Optical Power Current Analog parameter.')
drrmajorHighOptCurrent = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 48), Float()).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrmajorHighOptCurrent.setStatus('mandatory')
if mibBuilder.loadTexts: drrmajorHighOptCurrent.setDescription('The value of this object provides the Major High alarm value of the Optical Power Current Analog parameter. This value is a floating point number that is represented as an IEEE 32 bit number.')
drrmajorLowOptCurrent = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 49), Float()).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrmajorLowOptCurrent.setStatus('mandatory')
if mibBuilder.loadTexts: drrmajorLowOptCurrent.setDescription('The value of this object provides the Major Low alarm value of the Optical Power Current Analog parameter. This value is a floating point number that is represented as an IEEE 32 bit number.')
drrminorHighOptCurrent = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 50), Float()).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrminorHighOptCurrent.setStatus('mandatory')
if mibBuilder.loadTexts: drrminorHighOptCurrent.setDescription('The value of this object provides the Minor High alarm value of the Optical Power Current Analog parameter. This parameter has not been used by the module and set as the invalid float value (0xFFFFFFFF) ')
drrminorLowOptCurrent = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 51), Float()).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrminorLowOptCurrent.setStatus('mandatory')
if mibBuilder.loadTexts: drrminorLowOptCurrent.setDescription('The value of this object provides the Minor Low alarm value of the Optical Power Current Analog parameter. This parameter has not been used by the module and set as the invalid float value (0xFFFFFFFF) ')
drrcurrentValueOptCurrent = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 52), Float()).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrcurrentValueOptCurrent.setStatus('mandatory')
if mibBuilder.loadTexts: drrcurrentValueOptCurrent.setDescription('The value of this object provides the Current value of the Optical Power Current Analog parameter. This value is a floating point number that is represented as an IEEE 32 bit number.')
drrstateFlagOptCurrent = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 53), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("hidden", 1), ("read-only", 2), ("updateable", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrstateFlagOptCurrent.setStatus('mandatory')
if mibBuilder.loadTexts: drrstateFlagOptCurrent.setDescription('The value of this object provides the state of the Optical Power Current Analog parameter. (0-Hidden 1-Read-Only, 2-Updateable).')
drrminValueOptCurrent = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 54), Float()).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrminValueOptCurrent.setStatus('mandatory')
if mibBuilder.loadTexts: drrminValueOptCurrent.setDescription('The value of this object provides the minimum value the Optical Power Current Analog parameter can achive. This value is a floating point number that is represented as an IEEE 32 bit number. The input optical power in lower than this value, drrcurrentValueOptCurrent will report -99dBm')
drrmaxValueOptCurrent = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 55), Float()).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrmaxValueOptCurrent.setStatus('mandatory')
if mibBuilder.loadTexts: drrmaxValueOptCurrent.setDescription('The value of this object provides the maximum value the Optical Power Current Analog parameter can achive. This value is a floating point number that is represented as an IEEE 32 bit number. The input optical power in higher than this value, drrcurrentValueOptCurrent will report 99dBm')
drralarmStateOptCurrent = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 56), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("noAlarm", 1), ("majorLowAlarm", 2), ("minorLowAlarm", 3), ("minorHighAlarm", 4), ("majorHighAlarm", 5), ("informational", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drralarmStateOptCurrent.setStatus('mandatory')
if mibBuilder.loadTexts: drralarmStateOptCurrent.setDescription('The value of this object provides the curent alarm state of the Optical Power Current Analog parameter.')
drrlabel12VCurrent = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 57), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 80))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrlabel12VCurrent.setStatus('optional')
if mibBuilder.loadTexts: drrlabel12VCurrent.setDescription('The value of this object provides the label of the 12v power supply Current Analog parameter.')
drruom12VCurrent = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 58), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 80))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drruom12VCurrent.setStatus('optional')
if mibBuilder.loadTexts: drruom12VCurrent.setDescription('The value of this object provides the Unit of Measure of the 12v power supply Current Analog parameter.')
drrmajorHigh12VCurrent = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 59), Float()).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrmajorHigh12VCurrent.setStatus('mandatory')
if mibBuilder.loadTexts: drrmajorHigh12VCurrent.setDescription('The value of this object provides the Major High alarm value of the 12v power supply Current Analog parameter. This value is a floating point number that is represented as an IEEE 32 bit number.')
drrmajorLow12VCurrent = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 60), Float()).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrmajorLow12VCurrent.setStatus('mandatory')
if mibBuilder.loadTexts: drrmajorLow12VCurrent.setDescription('The value of this object provides the Major Low alarm value of the 12v power supply Current Analog parameter. This value is a floating point number that is represented as an IEEE 32 bit number.')
drrminorHigh12VCurrent = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 61), Float()).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrminorHigh12VCurrent.setStatus('mandatory')
if mibBuilder.loadTexts: drrminorHigh12VCurrent.setDescription('The value of this object provides the Minor High alarm value of the 12v power supply Current Analog parameter. This value is a floating point number that is represented as an IEEE 32 bit number.')
drrminorLow12VCurrent = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 62), Float()).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrminorLow12VCurrent.setStatus('mandatory')
if mibBuilder.loadTexts: drrminorLow12VCurrent.setDescription('The value of this object provides the Minor Low alarm value of the 12v power supply Current Analog parameter. This value is a floating point number that is represented as an IEEE 32 bit number.')
drrcurrentValue12VCurrent = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 63), Float()).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrcurrentValue12VCurrent.setStatus('mandatory')
if mibBuilder.loadTexts: drrcurrentValue12VCurrent.setDescription('The value of this object provides the Current value of the 12v power supply Current parameter. This value is a floating point number that is represented as an IEEE 32 bit number.')
drrstateFlag12VCurrent = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 64), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("hidden", 1), ("read-only", 2), ("updateable", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrstateFlag12VCurrent.setStatus('mandatory')
if mibBuilder.loadTexts: drrstateFlag12VCurrent.setDescription('The value of this object provides the state of the 12v power supply Current Analog parameter. (0-Hidden 1-Read-Only, 2-Updateable).')
drrminValue12VCurrent = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 65), Float()).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrminValue12VCurrent.setStatus('mandatory')
if mibBuilder.loadTexts: drrminValue12VCurrent.setDescription('The value of this object provides the minimum value the 12v power supply Current Analog parameter can achive. This value is a floating point number that is represented as an IEEE 32 bit number.')
drrmaxValue12VCurrent = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 66), Float()).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrmaxValue12VCurrent.setStatus('mandatory')
if mibBuilder.loadTexts: drrmaxValue12VCurrent.setDescription('The value of this object provides the maximum value the 12v power supply Current Analog parameter can achive. This value is a floating point number that is represented as an IEEE 32 bit number.')
drralarmState12VCurrent = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 67), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("noAlarm", 1), ("majorLowAlarm", 2), ("minorLowAlarm", 3), ("minorHighAlarm", 4), ("majorHighAlarm", 5), ("informational", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drralarmState12VCurrent.setStatus('mandatory')
if mibBuilder.loadTexts: drralarmState12VCurrent.setDescription('The value of this object provides the curent alarm state of the 12v power supply Current Analog parameter.')
drrlabelModTemp = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 68), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 80))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrlabelModTemp.setStatus('optional')
if mibBuilder.loadTexts: drrlabelModTemp.setDescription('The value of this object provides the label of the Module Temperature Analog parameter.')
drruomModTemp = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 69), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 80))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drruomModTemp.setStatus('optional')
if mibBuilder.loadTexts: drruomModTemp.setDescription('The value of this object provides the Unit of Measure of the Module Temperature Analog parameter.')
drrmajorHighModTemp = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 70), Float()).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrmajorHighModTemp.setStatus('mandatory')
if mibBuilder.loadTexts: drrmajorHighModTemp.setDescription('The value of this object provides the Major High alarm value of the Module Temperature Analog parameter. This value is a floating point number that is represented as an IEEE 32 bit number.')
drrmajorLowModTemp = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 71), Float()).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrmajorLowModTemp.setStatus('mandatory')
if mibBuilder.loadTexts: drrmajorLowModTemp.setDescription('The value of this object provides the Major Low alarm value of the Module Temperature Analog parameter. This value is a floating point number that is represented as an IEEE 32 bit number.')
drrminorHighModTemp = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 72), Float()).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrminorHighModTemp.setStatus('mandatory')
if mibBuilder.loadTexts: drrminorHighModTemp.setDescription('The value of this object provides the Minor High alarm value of the Module Temperature Analog parameter. This value is a floating point number that is represented as an IEEE 32 bit number.')
drrminorLowModTemp = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 73), Float()).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrminorLowModTemp.setStatus('mandatory')
if mibBuilder.loadTexts: drrminorLowModTemp.setDescription('The value of this object provides the Minor Low alarm value of the Module Temperature Analog parameter. This value is a floating point number that is represented as an IEEE 32 bit number.')
drrcurrentValueModTemp = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 74), Float()).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrcurrentValueModTemp.setStatus('mandatory')
if mibBuilder.loadTexts: drrcurrentValueModTemp.setDescription('The value of this object provides the Current value of the Module Temperature Analog parameter. This value is a floating point number that is represented as an IEEE 32 bit number.')
drrstateFlagModTemp = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 75), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("hidden", 1), ("read-only", 2), ("updateable", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrstateFlagModTemp.setStatus('mandatory')
if mibBuilder.loadTexts: drrstateFlagModTemp.setDescription('The value of this object provides the state of the Module Temperature Analog parameter. (0-Hidden 1-Read-Only, 2-Updateable).')
drrminValueModTemp = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 76), Float()).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrminValueModTemp.setStatus('mandatory')
if mibBuilder.loadTexts: drrminValueModTemp.setDescription('The value of this object provides the minimum value the Module Temperature Analog parameter can achive. This value is a floating point number that is represented as an IEEE 32 bit number.')
drrmaxValueModTemp = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 77), Float()).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrmaxValueModTemp.setStatus('mandatory')
if mibBuilder.loadTexts: drrmaxValueModTemp.setDescription('The value of this object provides the maximum value the Module Temperature Analog parameter can achive. This value is a floating point number that is represented as an IEEE 32 bit number.')
drralarmStateModTemp = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 78), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("noAlarm", 1), ("majorLowAlarm", 2), ("minorLowAlarm", 3), ("minorHighAlarm", 4), ("majorHighAlarm", 5), ("informational", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drralarmStateModTemp.setStatus('mandatory')
if mibBuilder.loadTexts: drralarmStateModTemp.setDescription('The value of this object provides the curent alarm state of the Module Temperature Analog parameter.')
drrlabelFanCurrent = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 79), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 80))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrlabelFanCurrent.setStatus('optional')
if mibBuilder.loadTexts: drrlabelFanCurrent.setDescription('The value of this object provides the label of the Fan Current Analog parameter.')
drruomFanCurrent = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 80), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 80))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drruomFanCurrent.setStatus('optional')
if mibBuilder.loadTexts: drruomFanCurrent.setDescription('The value of this object provides the Unit of Measure of the Fan Current Analog parameter.')
drrmajorHighFanCurrent = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 81), Float()).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrmajorHighFanCurrent.setStatus('mandatory')
if mibBuilder.loadTexts: drrmajorHighFanCurrent.setDescription('The value of this object provides the Major High alarm value of the Fan Current Analog parameter.This value is a floating point number that is represented as an IEEE 32 bit number.')
drrmajorLowFanCurrent = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 82), Float()).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrmajorLowFanCurrent.setStatus('mandatory')
if mibBuilder.loadTexts: drrmajorLowFanCurrent.setDescription('The value of this object provides the Major Low alarm value of the Fan Current Analog parameter.This value is a floating point number that is represented as an IEEE 32 bit number.')
drrminorHighFanCurrent = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 83), Float()).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrminorHighFanCurrent.setStatus('mandatory')
if mibBuilder.loadTexts: drrminorHighFanCurrent.setDescription('The value of this object provides the Minor High alarm value of the Fan Current Analog parameter. This parameter has not been used by the module and set as the invalid float value (0xFFFFFFFF)')
drrminorLowFanCurrent = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 84), Float()).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrminorLowFanCurrent.setStatus('mandatory')
if mibBuilder.loadTexts: drrminorLowFanCurrent.setDescription('The value of this object provides the Minor Low alarm value of the Fan Current Analog parameter. This parameter has not been used by the module and set as the invalid float value (0xFFFFFFFF)')
drrcurrentValueFanCurrent = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 85), Float()).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrcurrentValueFanCurrent.setStatus('mandatory')
if mibBuilder.loadTexts: drrcurrentValueFanCurrent.setDescription('The value of this object provides the Current value of the Fan Current Analog parameter.This value is a floating point number that is represented as an IEEE 32 bit number.')
drrstateFlagFanCurrent = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 86), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("hidden", 1), ("read-only", 2), ("updateable", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrstateFlagFanCurrent.setStatus('mandatory')
if mibBuilder.loadTexts: drrstateFlagFanCurrent.setDescription('The value of this object provides the state of the Fan Current Analog parameter. (0-Hidden 1-Read-Only, 2-Updateable).')
drrminValueFanCurrent = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 87), Float()).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrminValueFanCurrent.setStatus('mandatory')
if mibBuilder.loadTexts: drrminValueFanCurrent.setDescription('The value of this object provides the minimum value the Fan Current Analog parameter can achive. This value is a floating point number that is represented as an IEEE 32 bit number.')
drrmaxValueFanCurrent = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 88), Float()).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrmaxValueFanCurrent.setStatus('mandatory')
if mibBuilder.loadTexts: drrmaxValueFanCurrent.setDescription('The value of this object provides the maximum value the Fan Current Analog parameter can achive. This value is a floating point number that is represented as an IEEE 32 bit number.')
drralarmStateFanCurrent = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 2, 1, 89), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("noAlarm", 1), ("majorLowAlarm", 2), ("minorLowAlarm", 3), ("minorHighAlarm", 4), ("majorHighAlarm", 5), ("informational", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drralarmStateFanCurrent.setStatus('mandatory')
if mibBuilder.loadTexts: drralarmStateFanCurrent.setDescription('The value of this object provides the curent alarm state of the Fan Current Analog parameter.')
gx2drr3xDigitalTableIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 3, 2, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: gx2drr3xDigitalTableIndex.setStatus('mandatory')
if mibBuilder.loadTexts: gx2drr3xDigitalTableIndex.setDescription('The value of this object identifies the network element. This index is equal to the hfcCommonTableIndex for the same element.')
drrlabelTrippointMode = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 3, 2, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 80))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrlabelTrippointMode.setStatus('optional')
if mibBuilder.loadTexts: drrlabelTrippointMode.setDescription('The value of this object provides the label of the Trip point mode Digital parameter. The trip point mode defines the RF switch action based on the optical power input to the DRR3X')
drrenumTrippointMode = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 3, 2, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 80))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrenumTrippointMode.setStatus('optional')
if mibBuilder.loadTexts: drrenumTrippointMode.setDescription('The value of this object represents the Enumeration values possible for the Digital parameter. Each Enumerated values is separated by a common. ')
drrvalueTrippointMode = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 3, 2, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("off", 1), ("alarmOnly", 2), ("alarm-and-Switch", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: drrvalueTrippointMode.setStatus('mandatory')
if mibBuilder.loadTexts: drrvalueTrippointMode.setDescription('The value of this object is the current value of trip point mode. There are three possible value: 1-off, 2-alarm_only, 3-alarm&switch. If module input optical power is higher than the preset trippoint value, no operational difference for these three modes. If module input optical power is less than the preset trippoint value, 1) When trippoint mode is OFF, no trippoint alarm will be generated and RF output impedance will stay normal as low. 2) When tripoint mode is ALARM_ONLY, trip_point alarm will be generated when the input optical power lower than the preset trippoint level but RF output impedance will stay normal as low. 3) When trippoint mode is ALARM_SWITCH, trip point alarm will be generated and RF output impedance will switch to abnorml as high impedance')
drrstateflagTrippointMode = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 3, 2, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("hidden", 1), ("readOnly", 2), ("updateable", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrstateflagTrippointMode.setStatus('mandatory')
if mibBuilder.loadTexts: drrstateflagTrippointMode.setDescription('The value of this object provides the state of the the parameter. (0-Hidden 1-Read-Only, 2-Updateable).')
drrlabelFrontPanelTest = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 3, 2, 6), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 80))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrlabelFrontPanelTest.setStatus('optional')
if mibBuilder.loadTexts: drrlabelFrontPanelTest.setDescription('The value of this object provides the label of the Front panel test point Digital parameter.')
drrenumFrontPanelTest = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 3, 2, 7), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 80))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrenumFrontPanelTest.setStatus('optional')
if mibBuilder.loadTexts: drrenumFrontPanelTest.setDescription('The value of this object represents the Enumeration values possible for the Digital parameter. Each Enumerated values is separated by a common. The first value has a enumerated value of 1.')
drrvalueFrontPanelTest = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 3, 2, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("chanA", 1), ("chanB", 2), ("chanC", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: drrvalueFrontPanelTest.setStatus('mandatory')
if mibBuilder.loadTexts: drrvalueFrontPanelTest.setDescription('The value of this object is the current value of Front panel RF test point Channel number')
drrstateflagFrontPanelTest = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 3, 2, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("hidden", 1), ("readOnly", 2), ("updateable", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrstateflagFrontPanelTest.setStatus('mandatory')
if mibBuilder.loadTexts: drrstateflagFrontPanelTest.setDescription('The value of this object provides the state of the the parameter. (0-Hidden 1-Read-Only, 2-Updateable).')
drrlabelFactoryDefaultReset = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 3, 2, 10), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 80))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrlabelFactoryDefaultReset.setStatus('optional')
if mibBuilder.loadTexts: drrlabelFactoryDefaultReset.setDescription('The value of this object provides the label of the Factory Default Reset Digital parameter.')
drrenumFactoryDefaultReset = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 3, 2, 11), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 80))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrenumFactoryDefaultReset.setStatus('optional')
if mibBuilder.loadTexts: drrenumFactoryDefaultReset.setDescription('The value of this object represents the Enumeration values possible for the Digital parameter. Each Enumerated values is separated by a common. The first value has a enumerated value of 1.')
drrvalueFactoryDefaultReset = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 3, 2, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("off", 1), ("on", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: drrvalueFactoryDefaultReset.setStatus('mandatory')
if mibBuilder.loadTexts: drrvalueFactoryDefaultReset.setDescription('The read of this value will return a invalid value. The read will not reflect the actual setting value for this parameter ')
drrstateflagFactoryDefaultReset = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 3, 2, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("hidden", 1), ("read-only", 2), ("updateable", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrstateflagFactoryDefaultReset.setStatus('mandatory')
if mibBuilder.loadTexts: drrstateflagFactoryDefaultReset.setDescription('The value of this object provides the state of the the parameter. (0-Hidden 1-Read-Only, 2-Updateable).')
gx2drr3xStatusTableIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 4, 3, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: gx2drr3xStatusTableIndex.setStatus('mandatory')
if mibBuilder.loadTexts: gx2drr3xStatusTableIndex.setDescription('The value of this object identifies the network element. This index is equal to the hfcCommonTableIndex for the same element.')
drrlabelBoot = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 4, 3, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 80))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrlabelBoot.setStatus('optional')
if mibBuilder.loadTexts: drrlabelBoot.setDescription('The value of this object provides the label of the Boot Status parameter.')
drrvalueBoot = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 4, 3, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("ok", 1), ("undetermined", 2), ("warning", 3), ("minor", 4), ("major", 5), ("critical", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrvalueBoot.setStatus('mandatory')
if mibBuilder.loadTexts: drrvalueBoot.setDescription('The value of this object provides the current state of the parameter (0-Ok, 1-Undetermined 2-Warning, 3-Minor, 4-Major, 5-Critical).')
drrstateflagBoot = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 4, 3, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("hidden", 1), ("read-only", 2), ("updateable", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrstateflagBoot.setStatus('mandatory')
if mibBuilder.loadTexts: drrstateflagBoot.setDescription('The value of this object provides the state of the the parameter. (0-Hidden 1-Read-Only, 2-Updateable).')
drrlabelFlash = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 4, 3, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 80))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrlabelFlash.setStatus('optional')
if mibBuilder.loadTexts: drrlabelFlash.setDescription('The value of this object provides the label of the Flash Status parameter.')
drrvalueFlash = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 4, 3, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("ok", 1), ("undetermined", 2), ("warning", 3), ("minor", 4), ("major", 5), ("critical", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrvalueFlash.setStatus('mandatory')
if mibBuilder.loadTexts: drrvalueFlash.setDescription('The value of this object provides the current state of the parameter (0-Ok, 1-Undetermined 2-Warning, 3-Minor, 4-Major, 5-Critical).')
drrstateflagFlash = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 4, 3, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("hidden", 1), ("read-only", 2), ("updateable", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrstateflagFlash.setStatus('mandatory')
if mibBuilder.loadTexts: drrstateflagFlash.setDescription('The value of this object provides the state of the the parameter. (0-Hidden 1-Read-Only, 2-Updateable).')
drrlabelFactoryDataCRC = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 4, 3, 8), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 80))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrlabelFactoryDataCRC.setStatus('optional')
if mibBuilder.loadTexts: drrlabelFactoryDataCRC.setDescription('The value of this object provides the label of the Factory Data CRC Status parameter.')
drrvalueFactoryDataCRC = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 4, 3, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("ok", 1), ("undetermined", 2), ("warning", 3), ("minor", 4), ("major", 5), ("critical", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrvalueFactoryDataCRC.setStatus('mandatory')
if mibBuilder.loadTexts: drrvalueFactoryDataCRC.setDescription('The value of this object provides the current state of the parameter (0-Ok, 1-Undetermined 2-Warning, 3-Minor, 4-Major, 5-Critical).')
drrstateflagFactoryDataCRC = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 4, 3, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("hidden", 1), ("read-only", 2), ("updateable", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrstateflagFactoryDataCRC.setStatus('mandatory')
if mibBuilder.loadTexts: drrstateflagFactoryDataCRC.setDescription('The value of this object provides the state of the the parameter. (0-Hidden 1-Read-Only, 2-Updateable).')
drrlabelAlarmDataCrc = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 4, 3, 11), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 80))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrlabelAlarmDataCrc.setStatus('optional')
if mibBuilder.loadTexts: drrlabelAlarmDataCrc.setDescription('The value of this object provides the label of the Alarm Data Crc parameter.')
drrvalueAlarmDataCrc = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 4, 3, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("ok", 1), ("undetermined", 2), ("warning", 3), ("minor", 4), ("major", 5), ("critical", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrvalueAlarmDataCrc.setStatus('mandatory')
if mibBuilder.loadTexts: drrvalueAlarmDataCrc.setDescription('The value of this object provides the current state of the parameter (0-Ok, 1-Undetermined 2-Warning, 3-Minor, 4-Major, 5-Critical).')
drrstateflagAlarmDataCrc = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 4, 3, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("hidden", 1), ("read-only", 2), ("updateable", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrstateflagAlarmDataCrc.setStatus('mandatory')
if mibBuilder.loadTexts: drrstateflagAlarmDataCrc.setDescription('The value of this object provides the state of the the parameter. (0-Hidden 1-Read-Only, 2-Updateable).')
drrlabelHardwareStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 4, 3, 14), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 80))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrlabelHardwareStatus.setStatus('optional')
if mibBuilder.loadTexts: drrlabelHardwareStatus.setDescription('The value of this object provides the label of the Module Hardware Status parameter.')
drrvalueHardwareStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 4, 3, 15), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("ok", 1), ("undetermined", 2), ("warning", 3), ("minor", 4), ("major", 5), ("critical", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrvalueHardwareStatus.setStatus('mandatory')
if mibBuilder.loadTexts: drrvalueHardwareStatus.setDescription('The value of this object provides the current state of the parameter (0-Ok, 1-Undetermined 2-Warning, 3-Minor, 4-Major, 5-Critical).')
drrstateflagHardwareStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 4, 3, 16), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("hidden", 1), ("read-only", 2), ("updateable", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrstateflagHardwareStatus.setStatus('mandatory')
if mibBuilder.loadTexts: drrstateflagHardwareStatus.setDescription('The value of this object provides the state of the parameter. (0-Hidden 1-Read-Only, 2-Updateable).')
drrlabelOpticTripPointStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 4, 3, 17), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 80))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrlabelOpticTripPointStatus.setStatus('optional')
if mibBuilder.loadTexts: drrlabelOpticTripPointStatus.setDescription('The value of this object provides the label of the Optical Trip Point Status parameter.It indicates whether the optical power is above(0) or below(1) trip point value')
drrvalueOpticTripPointStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 4, 3, 18), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("ok", 1), ("undetermined", 2), ("warning", 3), ("minor", 4), ("major", 5), ("critical", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrvalueOpticTripPointStatus.setStatus('mandatory')
if mibBuilder.loadTexts: drrvalueOpticTripPointStatus.setDescription('The value of this object provides the current state of the parameter (0-Ok, 1-Undetermined 2-Warning, 3-Minor, 4-Major, 5-Critical).')
drrstateflagOpticTripPointStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 4, 3, 19), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("hidden", 1), ("read-only", 2), ("updateable", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrstateflagOpticTripPointStatus.setStatus('mandatory')
if mibBuilder.loadTexts: drrstateflagOpticTripPointStatus.setDescription('The value of this object provides the state of the parameter. (0-Hidden 1-Read-Only, 2-Updateable).')
drrlabelLinkStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 4, 3, 20), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 80))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrlabelLinkStatus.setStatus('optional')
if mibBuilder.loadTexts: drrlabelLinkStatus.setDescription('The value of this object provides the label of the Link Activity Status parameter. It represents receiver out of SYN with transmitter or Demux can not detect the optical signal.')
drrvalueLinkStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 4, 3, 21), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("ok", 1), ("undetermined", 2), ("warning", 3), ("minor", 4), ("major", 5), ("critical", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrvalueLinkStatus.setStatus('mandatory')
if mibBuilder.loadTexts: drrvalueLinkStatus.setDescription('The value of this object provides the current state of the parameter (0-Ok, 1-Undetermined 2-Warning, 3-Minor, 4-Major, 5-Critical).')
drrstateflagLinkStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 4, 3, 22), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("hidden", 1), ("read-only", 2), ("updateable", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrstateflagLinkStatus.setStatus('mandatory')
if mibBuilder.loadTexts: drrstateflagLinkStatus.setDescription('The value of this object provides the state of the parameter. (0-Hidden 1-Read-Only, 2-Updateable).')
gx2drr3xFactoryTableIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 5, 4, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: gx2drr3xFactoryTableIndex.setStatus('mandatory')
if mibBuilder.loadTexts: gx2drr3xFactoryTableIndex.setDescription('The value of this object identifies the network element. This index is equal to the hfcCommonTableIndex for the same element.')
drrbootControlByte = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 5, 4, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrbootControlByte.setStatus('mandatory')
if mibBuilder.loadTexts: drrbootControlByte.setDescription('The value of this object indicates which bank the firmware is currently being boot from.')
drrbootStatusByte = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 5, 4, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrbootStatusByte.setStatus('mandatory')
if mibBuilder.loadTexts: drrbootStatusByte.setDescription('This object indicates the status of the last boot')
drrbank1CRC = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 5, 4, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrbank1CRC.setStatus('mandatory')
if mibBuilder.loadTexts: drrbank1CRC.setDescription('This object provides the CRC code of bank 0.')
drrbank2CRC = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 5, 4, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrbank2CRC.setStatus('mandatory')
if mibBuilder.loadTexts: drrbank2CRC.setDescription('This object provides the CRC code of bank 1.')
drrprgEEPROMByte = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 5, 4, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrprgEEPROMByte.setStatus('mandatory')
if mibBuilder.loadTexts: drrprgEEPROMByte.setDescription('This object indicates if the EEPROM has been programmed')
drrfactoryCRC = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 5, 4, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrfactoryCRC.setStatus('mandatory')
if mibBuilder.loadTexts: drrfactoryCRC.setDescription('This object provides the CRC code for the Factory data.')
drrcalculateCRC = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 5, 4, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("factory", 1), ("alarm", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrcalculateCRC.setStatus('mandatory')
if mibBuilder.loadTexts: drrcalculateCRC.setDescription('This object indicates which of the Emnums will have the CRC calculated.')
drrhourMeter = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 5, 4, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrhourMeter.setStatus('mandatory')
if mibBuilder.loadTexts: drrhourMeter.setDescription('This object provides the hour meter reading of the module.')
drrflashPrgCntA = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 5, 4, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrflashPrgCntA.setStatus('mandatory')
if mibBuilder.loadTexts: drrflashPrgCntA.setDescription('This object provides the number of times Bank 0 flash has been programmed.')
drrflashPrgCntB = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 5, 4, 11), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrflashPrgCntB.setStatus('mandatory')
if mibBuilder.loadTexts: drrflashPrgCntB.setDescription('This object provides the number of times Bank 1 flash has been programmed.')
drrflashBankARev = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 5, 4, 12), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 80))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrflashBankARev.setStatus('mandatory')
if mibBuilder.loadTexts: drrflashBankARev.setDescription('This object provides the revision of flash bank 0.')
drrflashBankBRev = MibTableColumn((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13, 5, 4, 13), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 80))).setMaxAccess("readonly")
if mibBuilder.loadTexts: drrflashBankBRev.setStatus('mandatory')
if mibBuilder.loadTexts: drrflashBankBRev.setDescription('This object provides the revision of flash bank 1.')
trapdrrConfigChangeInteger = NotificationType((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13) + (0,1)).setObjects(("NLSBBN-TRAPS-MIB", "trapIdentifier"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemModelNumber"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemSerialNum"), ("NLSBBN-TRAPS-MIB", "trapPerceivedSeverity"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemOperState"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemAlarmStatus"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemAdminState"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemAvailStatus"), ("NLSBBN-TRAPS-MIB", "trapText"), ("NLSBBN-TRAPS-MIB", "trapChangedObjectId"), ("NLSBBN-TRAPS-MIB", "trapChangedValueInteger"), ("NLSBBN-TRAPS-MIB", "trapNETrapLastTrapTimeStamp"))
if mibBuilder.loadTexts: trapdrrConfigChangeInteger.setDescription("This trap is issued if configuration of a single variable with integer type was changed (via ANY interface). TrapChangedValueInteger variable may contain current reading of that variable. trapPerceivedSeverity - 'indeterminate'")
trapdrrConfigChangeDisplayString = NotificationType((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13) + (0,2)).setObjects(("NLSBBN-TRAPS-MIB", "trapIdentifier"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemModelNumber"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemSerialNum"), ("NLSBBN-TRAPS-MIB", "trapPerceivedSeverity"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemOperState"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemAlarmStatus"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemAdminState"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemAvailStatus"), ("NLSBBN-TRAPS-MIB", "trapText"), ("NLSBBN-TRAPS-MIB", "trapChangedObjectId"), ("NLSBBN-TRAPS-MIB", "trapChangedValueDisplayString"), ("NLSBBN-TRAPS-MIB", "trapNETrapLastTrapTimeStamp"))
if mibBuilder.loadTexts: trapdrrConfigChangeDisplayString.setDescription("This trap is issued if configuration of a single variable with DispalayString type was changed (via ANY interface). TrapChangedValueDisplayString variable may contain current reading of that variable. trapPerceivedSeverity - 'indeterminate'")
trapdrr12VCurrentAlarm = NotificationType((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13) + (0,3)).setObjects(("NLSBBN-TRAPS-MIB", "trapIdentifier"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemModelNumber"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemSerialNum"), ("NLSBBN-TRAPS-MIB", "trapPerceivedSeverity"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemOperState"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemAlarmStatus"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemAdminState"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemAvailStatus"), ("NLSBBN-TRAPS-MIB", "trapText"), ("NLSBBN-TRAPS-MIB", "trapChangedObjectId"), ("NLSBBN-TRAPS-MIB", "trapChangedValueInteger"), ("NLSBBN-TRAPS-MIB", "trapNETrapLastTrapTimeStamp"))
if mibBuilder.loadTexts: trapdrr12VCurrentAlarm.setDescription('This trap is issued when the 12V power supply Current parameter goes out of range. trapAdditionalInfoInteger variable contains current reading of the this parameter.')
trapdrrModuleTempAlarm = NotificationType((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13) + (0,4)).setObjects(("NLSBBN-TRAPS-MIB", "trapIdentifier"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemModelNumber"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemSerialNum"), ("NLSBBN-TRAPS-MIB", "trapPerceivedSeverity"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemOperState"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemAlarmStatus"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemAdminState"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemAvailStatus"), ("NLSBBN-TRAPS-MIB", "trapText"), ("NLSBBN-TRAPS-MIB", "trapChangedObjectId"), ("NLSBBN-TRAPS-MIB", "trapChangedValueInteger"), ("NLSBBN-TRAPS-MIB", "trapNETrapLastTrapTimeStamp"))
if mibBuilder.loadTexts: trapdrrModuleTempAlarm.setDescription('This trap is issued when the Internal Module Temperature goes out of range. trapAdditionalInfoInteger variable contains current reading of the this parameter.')
trapdrrFanCurrentAlarm = NotificationType((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13) + (0,5)).setObjects(("NLSBBN-TRAPS-MIB", "trapIdentifier"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemModelNumber"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemSerialNum"), ("NLSBBN-TRAPS-MIB", "trapPerceivedSeverity"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemOperState"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemAlarmStatus"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemAdminState"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemAvailStatus"), ("NLSBBN-TRAPS-MIB", "trapText"), ("NLSBBN-TRAPS-MIB", "trapChangedObjectId"), ("NLSBBN-TRAPS-MIB", "trapChangedValueInteger"), ("NLSBBN-TRAPS-MIB", "trapNETrapLastTrapTimeStamp"))
if mibBuilder.loadTexts: trapdrrFanCurrentAlarm.setDescription('This trap is issued when the Fan Current parameter goes out of range. trapAdditionalInfoInteger variable contains current reading of the this parameter.')
trapdrrFlashAlarm = NotificationType((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13) + (0,6)).setObjects(("NLSBBN-TRAPS-MIB", "trapIdentifier"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemModelNumber"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemSerialNum"), ("NLSBBN-TRAPS-MIB", "trapPerceivedSeverity"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemOperState"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemAlarmStatus"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemAdminState"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemAvailStatus"), ("NLSBBN-TRAPS-MIB", "trapText"), ("NLSBBN-TRAPS-MIB", "trapChangedObjectId"), ("NLSBBN-TRAPS-MIB", "trapChangedValueInteger"), ("NLSBBN-TRAPS-MIB", "trapNETrapLastTrapTimeStamp"))
if mibBuilder.loadTexts: trapdrrFlashAlarm.setDescription('This trap is issued when the detects an error during Flash memory operations.')
trapdrrBankBootAlarm = NotificationType((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13) + (0,7)).setObjects(("NLSBBN-TRAPS-MIB", "trapIdentifier"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemModelNumber"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemSerialNum"), ("NLSBBN-TRAPS-MIB", "trapPerceivedSeverity"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemOperState"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemAlarmStatus"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemAdminState"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemAvailStatus"), ("NLSBBN-TRAPS-MIB", "trapText"), ("NLSBBN-TRAPS-MIB", "trapChangedObjectId"), ("NLSBBN-TRAPS-MIB", "trapChangedValueInteger"), ("NLSBBN-TRAPS-MIB", "trapNETrapLastTrapTimeStamp"))
if mibBuilder.loadTexts: trapdrrBankBootAlarm.setDescription('This trap is issued when the detects an error while booting from bank 0 or bank 1.')
trapdrrAlarmDataCRCAlarm = NotificationType((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13) + (0,8)).setObjects(("NLSBBN-TRAPS-MIB", "trapIdentifier"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemModelNumber"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemSerialNum"), ("NLSBBN-TRAPS-MIB", "trapPerceivedSeverity"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemOperState"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemAlarmStatus"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemAdminState"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemAvailStatus"), ("NLSBBN-TRAPS-MIB", "trapText"), ("NLSBBN-TRAPS-MIB", "trapChangedObjectId"), ("NLSBBN-TRAPS-MIB", "trapChangedValueInteger"), ("NLSBBN-TRAPS-MIB", "trapNETrapLastTrapTimeStamp"))
if mibBuilder.loadTexts: trapdrrAlarmDataCRCAlarm.setDescription('This trap is issued when the Alarm Data CRC is incorrect.')
trapdrrHardwareErrAlarm = NotificationType((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13) + (0,9)).setObjects(("NLSBBN-TRAPS-MIB", "trapIdentifier"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemModelNumber"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemSerialNum"), ("NLSBBN-TRAPS-MIB", "trapPerceivedSeverity"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemOperState"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemAlarmStatus"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemAdminState"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemAvailStatus"), ("NLSBBN-TRAPS-MIB", "trapText"), ("NLSBBN-TRAPS-MIB", "trapChangedObjectId"), ("NLSBBN-TRAPS-MIB", "trapChangedValueInteger"), ("NLSBBN-TRAPS-MIB", "trapNETrapLastTrapTimeStamp"))
if mibBuilder.loadTexts: trapdrrHardwareErrAlarm.setDescription('This trap is issued when hardware is abnormal.')
trapdrrOpticalSignalAlarm = NotificationType((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13) + (0,10)).setObjects(("NLSBBN-TRAPS-MIB", "trapIdentifier"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemModelNumber"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemSerialNum"), ("NLSBBN-TRAPS-MIB", "trapPerceivedSeverity"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemOperState"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemAlarmStatus"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemAdminState"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemAvailStatus"), ("NLSBBN-TRAPS-MIB", "trapText"), ("NLSBBN-TRAPS-MIB", "trapChangedObjectId"), ("NLSBBN-TRAPS-MIB", "trapChangedValueInteger"), ("NLSBBN-TRAPS-MIB", "trapNETrapLastTrapTimeStamp"))
if mibBuilder.loadTexts: trapdrrOpticalSignalAlarm.setDescription('This trap is issued when the optical signal is beyond the reason range.')
trapdrrFactoryDataCRCAlarm = NotificationType((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13) + (0,11)).setObjects(("NLSBBN-TRAPS-MIB", "trapIdentifier"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemModelNumber"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemSerialNum"), ("NLSBBN-TRAPS-MIB", "trapPerceivedSeverity"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemOperState"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemAlarmStatus"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemAdminState"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemAvailStatus"), ("NLSBBN-TRAPS-MIB", "trapText"), ("NLSBBN-TRAPS-MIB", "trapChangedObjectId"), ("NLSBBN-TRAPS-MIB", "trapChangedValueInteger"), ("NLSBBN-TRAPS-MIB", "trapNETrapLastTrapTimeStamp"))
if mibBuilder.loadTexts: trapdrrFactoryDataCRCAlarm.setDescription('This trap is issued when the Factory Data CRC is incorrect.')
trapdrrResetFactoryDefaultAlarm = NotificationType((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13) + (0,12)).setObjects(("NLSBBN-TRAPS-MIB", "trapIdentifier"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemModelNumber"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemSerialNum"), ("NLSBBN-TRAPS-MIB", "trapPerceivedSeverity"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemOperState"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemAlarmStatus"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemAdminState"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemAvailStatus"), ("NLSBBN-TRAPS-MIB", "trapText"), ("NLSBBN-TRAPS-MIB", "trapChangedObjectId"), ("NLSBBN-TRAPS-MIB", "trapChangedValueInteger"), ("NLSBBN-TRAPS-MIB", "trapNETrapLastTrapTimeStamp"))
if mibBuilder.loadTexts: trapdrrResetFactoryDefaultAlarm.setDescription('This trap is issued when the Factory Default Reset occurs.')
trapdrrTripPointAlarm = NotificationType((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13) + (0,13)).setObjects(("NLSBBN-TRAPS-MIB", "trapIdentifier"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemModelNumber"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemSerialNum"), ("NLSBBN-TRAPS-MIB", "trapPerceivedSeverity"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemOperState"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemAlarmStatus"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemAdminState"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemAvailStatus"), ("NLSBBN-TRAPS-MIB", "trapText"), ("NLSBBN-TRAPS-MIB", "trapChangedObjectId"), ("NLSBBN-TRAPS-MIB", "trapChangedValueInteger"), ("NLSBBN-TRAPS-MIB", "trapNETrapLastTrapTimeStamp"))
if mibBuilder.loadTexts: trapdrrTripPointAlarm.setDescription('This trap is issued when the Input optical power is lower than the custumer set trip point.')
trapdrrLinkAlarm = NotificationType((1, 3, 6, 1, 4, 1, 1166, 6, 1, 2, 13) + (0,14)).setObjects(("NLSBBN-TRAPS-MIB", "trapIdentifier"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemModelNumber"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemSerialNum"), ("NLSBBN-TRAPS-MIB", "trapPerceivedSeverity"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemOperState"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemAlarmStatus"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemAdminState"), ("NLSBBN-TRAPS-MIB", "trapNetworkElemAvailStatus"), ("NLSBBN-TRAPS-MIB", "trapText"), ("NLSBBN-TRAPS-MIB", "trapChangedObjectId"), ("NLSBBN-TRAPS-MIB", "trapChangedValueInteger"), ("NLSBBN-TRAPS-MIB", "trapNETrapLastTrapTimeStamp"))
if mibBuilder.loadTexts: trapdrrLinkAlarm.setDescription('This trap is issued when receiver is out of SYN with transmitter')
mibBuilder.exportSymbols("OMNI-gx2drr3x-MIB", drrstateflagFlash=drrstateflagFlash, drrminValueTrippointLevel=drrminValueTrippointLevel, drrstateFlagRFAAttenuation=drrstateFlagRFAAttenuation, drrminValueRFBAttenuation=drrminValueRFBAttenuation, drrmaxValueFanCurrent=drrmaxValueFanCurrent, drrlabelModTemp=drrlabelModTemp, drrbootStatusByte=drrbootStatusByte, trapdrrFlashAlarm=trapdrrFlashAlarm, drrminorHighOptCurrent=drrminorHighOptCurrent, gx2drr3xFactoryEntry=gx2drr3xFactoryEntry, drrlabelFactoryDefaultReset=drrlabelFactoryDefaultReset, drrvalueTrippointMode=drrvalueTrippointMode, drrmaxValueRFAAttenuation=drrmaxValueRFAAttenuation, drrflashPrgCntB=drrflashPrgCntB, drrminValueRFCAttenuation=drrminValueRFCAttenuation, drruom12VCurrent=drruom12VCurrent, drrstateFlagRFCAttenuation=drrstateFlagRFCAttenuation, gx2drr3xStatusEntry=gx2drr3xStatusEntry, drrmaxValue12VCurrent=drrmaxValue12VCurrent, drralarmStateOptCurrent=drralarmStateOptCurrent, drrlabelHardwareStatus=drrlabelHardwareStatus, drrlabelOptCurrent=drrlabelOptCurrent, drrminorLowTrippointLevel=drrminorLowTrippointLevel, gx2drr3xStatusTableIndex=gx2drr3xStatusTableIndex, drrstateflagOpticTripPointStatus=drrstateflagOpticTripPointStatus, drrfactoryCRC=drrfactoryCRC, drralarmStateRFBAttenuation=drralarmStateRFBAttenuation, drrminorLowModTemp=drrminorLowModTemp, trapdrr12VCurrentAlarm=trapdrr12VCurrentAlarm, drrlabelRFAAttenuation=drrlabelRFAAttenuation, drrmajorHighRFBAttenuation=drrmajorHighRFBAttenuation, drrmajorHighTrippointLevel=drrmajorHighTrippointLevel, drrlabelRFCAttenuation=drrlabelRFCAttenuation, drrmajorHighRFAAttenuation=drrmajorHighRFAAttenuation, drrstateFlagTrippointLevel=drrstateFlagTrippointLevel, drrcalculateCRC=drrcalculateCRC, drrcurrentValueRFAAttenuation=drrcurrentValueRFAAttenuation, drrmaxValueOptCurrent=drrmaxValueOptCurrent, drrcurrentValueRFCAttenuation=drrcurrentValueRFCAttenuation, drrminorHighModTemp=drrminorHighModTemp, drralarmState12VCurrent=drralarmState12VCurrent, drrstateFlagRFBAttenuation=drrstateFlagRFBAttenuation, drrbootControlByte=drrbootControlByte, drrminValueFanCurrent=drrminValueFanCurrent, drruomTrippointLevel=drruomTrippointLevel, gx2drr3xAnalogEntry=gx2drr3xAnalogEntry, drrmaxValueTrippointLevel=drrmaxValueTrippointLevel, drrminorHighFanCurrent=drrminorHighFanCurrent, drrcurrentValueOptCurrent=drrcurrentValueOptCurrent, drrlabelFlash=drrlabelFlash, drrlabel12VCurrent=drrlabel12VCurrent, drrstateflagFactoryDataCRC=drrstateflagFactoryDataCRC, drrcurrentValueRFBAttenuation=drrcurrentValueRFBAttenuation, drrprgEEPROMByte=drrprgEEPROMByte, drrflashPrgCntA=drrflashPrgCntA, drrmaxValueRFCAttenuation=drrmaxValueRFCAttenuation, drrminorLowFanCurrent=drrminorLowFanCurrent, drrlabelBoot=drrlabelBoot, drrmaxValueRFBAttenuation=drrmaxValueRFBAttenuation, drrstateFlag12VCurrent=drrstateFlag12VCurrent, drrvalueLinkStatus=drrvalueLinkStatus, drralarmStateRFCAttenuation=drralarmStateRFCAttenuation, drrlabelOpticTripPointStatus=drrlabelOpticTripPointStatus, drrmajorHigh12VCurrent=drrmajorHigh12VCurrent, drrstateflagBoot=drrstateflagBoot, drrminorHigh12VCurrent=drrminorHigh12VCurrent, gx2drr3xDigitalTableIndex=gx2drr3xDigitalTableIndex, drrvalueFrontPanelTest=drrvalueFrontPanelTest, trapdrrBankBootAlarm=trapdrrBankBootAlarm, drrminorHighRFCAttenuation=drrminorHighRFCAttenuation, drrlabelFrontPanelTest=drrlabelFrontPanelTest, drrmajorLowOptCurrent=drrmajorLowOptCurrent, gx2drr3xStatusTable=gx2drr3xStatusTable, drrvalueAlarmDataCrc=drrvalueAlarmDataCrc, trapdrrHardwareErrAlarm=trapdrrHardwareErrAlarm, drrminorLowRFAAttenuation=drrminorLowRFAAttenuation, drrstateflagHardwareStatus=drrstateflagHardwareStatus, drrvalueHardwareStatus=drrvalueHardwareStatus, drrstateFlagOptCurrent=drrstateFlagOptCurrent, trapdrrConfigChangeInteger=trapdrrConfigChangeInteger, drrmajorHighModTemp=drrmajorHighModTemp, drrstateFlagModTemp=drrstateFlagModTemp, drrbank2CRC=drrbank2CRC, drrflashBankARev=drrflashBankARev, drrmaxValueModTemp=drrmaxValueModTemp, trapdrrModuleTempAlarm=trapdrrModuleTempAlarm, drrcurrentValueFanCurrent=drrcurrentValueFanCurrent, drrstateflagFrontPanelTest=drrstateflagFrontPanelTest, drruomRFBAttenuation=drruomRFBAttenuation, drrenumFrontPanelTest=drrenumFrontPanelTest, drrmajorHighOptCurrent=drrmajorHighOptCurrent, gx2drr3xFactoryTableIndex=gx2drr3xFactoryTableIndex, drrstateflagFactoryDefaultReset=drrstateflagFactoryDefaultReset, drrvalueOpticTripPointStatus=drrvalueOpticTripPointStatus, trapdrrOpticalSignalAlarm=trapdrrOpticalSignalAlarm, drruomOptCurrent=drruomOptCurrent, drrmajorLowModTemp=drrmajorLowModTemp, drrcurrentValueModTemp=drrcurrentValueModTemp, drrflashBankBRev=drrflashBankBRev, drrminValueRFAAttenuation=drrminValueRFAAttenuation, drrmajorLowRFCAttenuation=drrmajorLowRFCAttenuation, drrmajorLowRFBAttenuation=drrmajorLowRFBAttenuation, drrmajorLowFanCurrent=drrmajorLowFanCurrent, drrminorLowRFCAttenuation=drrminorLowRFCAttenuation, drruomModTemp=drruomModTemp, gx2drr3xFactoryTable=gx2drr3xFactoryTable, drrlabelFanCurrent=drrlabelFanCurrent, drrlabelTrippointLevel=drrlabelTrippointLevel, gx2drr3xAnalogTable=gx2drr3xAnalogTable, drrminValueOptCurrent=drrminValueOptCurrent, drralarmStateRFAAttenuation=drralarmStateRFAAttenuation, drrminorHighRFAAttenuation=drrminorHighRFAAttenuation, drrminorHighTrippointLevel=drrminorHighTrippointLevel, drrstateflagTrippointMode=drrstateflagTrippointMode, gx2drr3xDescriptor=gx2drr3xDescriptor, drruomFanCurrent=drruomFanCurrent, gx2drr3xDigitalTable=gx2drr3xDigitalTable, drrminValue12VCurrent=drrminValue12VCurrent, drralarmStateTrippointLevel=drralarmStateTrippointLevel, trapdrrFactoryDataCRCAlarm=trapdrrFactoryDataCRCAlarm, drruomRFAAttenuation=drruomRFAAttenuation, trapdrrAlarmDataCRCAlarm=trapdrrAlarmDataCRCAlarm, drrstateFlagFanCurrent=drrstateFlagFanCurrent, drrlabelFactoryDataCRC=drrlabelFactoryDataCRC, drrcurrentValue12VCurrent=drrcurrentValue12VCurrent, drrvalueBoot=drrvalueBoot, drrminorLow12VCurrent=drrminorLow12VCurrent, drrcurrentValueTrippointLevel=drrcurrentValueTrippointLevel, drrstateflagAlarmDataCrc=drrstateflagAlarmDataCrc, drrminorHighRFBAttenuation=drrminorHighRFBAttenuation, drrmajorHighRFCAttenuation=drrmajorHighRFCAttenuation, drrbank1CRC=drrbank1CRC, drrminValueModTemp=drrminValueModTemp, gx2drr3xDigitalEntry=gx2drr3xDigitalEntry, drrstateflagLinkStatus=drrstateflagLinkStatus, drrhourMeter=drrhourMeter, drrmajorLowTrippointLevel=drrmajorLowTrippointLevel, trapdrrResetFactoryDefaultAlarm=trapdrrResetFactoryDefaultAlarm, trapdrrConfigChangeDisplayString=trapdrrConfigChangeDisplayString, drrminorLowOptCurrent=drrminorLowOptCurrent, gx2drr3xAnalogTableIndex=gx2drr3xAnalogTableIndex, drrminorLowRFBAttenuation=drrminorLowRFBAttenuation, drrenumFactoryDefaultReset=drrenumFactoryDefaultReset, drrmajorLowRFAAttenuation=drrmajorLowRFAAttenuation, drrmajorHighFanCurrent=drrmajorHighFanCurrent, trapdrrTripPointAlarm=trapdrrTripPointAlarm, drralarmStateFanCurrent=drralarmStateFanCurrent, trapdrrFanCurrentAlarm=trapdrrFanCurrentAlarm, drrvalueFactoryDefaultReset=drrvalueFactoryDefaultReset, drrlabelRFBAttenuation=drrlabelRFBAttenuation, drruomRFCAttenuation=drruomRFCAttenuation, drrenumTrippointMode=drrenumTrippointMode, drralarmStateModTemp=drralarmStateModTemp, drrmajorLow12VCurrent=drrmajorLow12VCurrent, drrlabelAlarmDataCrc=drrlabelAlarmDataCrc, drrvalueFlash=drrvalueFlash, Float=Float, drrlabelLinkStatus=drrlabelLinkStatus, trapdrrLinkAlarm=trapdrrLinkAlarm, drrlabelTrippointMode=drrlabelTrippointMode, drrvalueFactoryDataCRC=drrvalueFactoryDataCRC)
|
py | b411b767937bd36b622fdb0e48800c26a43a2588 | """
Datapane Reports Object
Describes the `Report` object and included APIs for saving and publishing them.
"""
import shutil
import typing as t
import uuid
import warnings
import webbrowser
from enum import Enum, IntEnum
from functools import reduce
from os import path as osp
from pathlib import Path
import importlib_resources as ir
from glom import glom
from jinja2 import Environment, FileSystemLoader, Markup, Template, contextfunction
from lxml import etree
from lxml.etree import Element
from datapane import __version__
from datapane.client.api.common import DPTmpFile, Resource, do_download_file
from datapane.client.api.dp_object import DPObjectRef
from datapane.client.api.runtime import _report
from datapane.common import log, timestamp
from datapane.common.report import local_report_def, validate_report_doc
from .blocks import BlockOrPrimitive, BuilderState, E, Group, Page, PageOrPrimitive, Select
local_post_xslt = etree.parse(str(local_report_def / "local_post_process.xslt"))
local_post_transform = etree.XSLT(local_post_xslt)
# only these types will be documented by default
__all__ = ["Report", "Visibility"]
__pdoc__ = {
"Report.endpoint": False,
}
@contextfunction
def include_raw(ctx, name):
""" Normal jinja2 {% include %} doesn't escape {{...}} which appear in React's source code """
env = ctx.environment
return Markup(env.loader.get_source(env, name)[0])
def is_jupyter() -> bool:
"""Checks if inside ipython shell inside browser"""
try:
return get_ipython().__class__.__name__ == "ZMQInteractiveShell" # noqa: F821
except Exception:
return False
class Visibility(IntEnum):
"""The report visibility type"""
PRIVATE = 0 # private to owner
ORG = 1 # visible to all users in the org
PUBLIC = 2 # anon/unauthed access
class ReportType(Enum):
"""The report type"""
DASHBOARD = "dashboard"
REPORT = "report"
ARTICLE = "article"
class ReportFileWriter:
""" Collects data needed to display a local report, and generates the local HTML """
template: t.Optional[Template] = None
assets: Path
asset_url = "https://datapane.com/static"
asset_js = "local-report-base.css"
asset_css = "local-report-base.js"
def _setup_template(self) -> Template:
""" Jinja template setup for local rendering """
# check we have the files, download if not
self.assets = ir.files("datapane.resources.local_report")
if not (self.assets / self.asset_js).exists():
log.warning("Can't find report assets, downloading")
do_download_file(f"{self.asset_url}/{self.asset_js}", self.assets / self.asset_js)
do_download_file(f"{self.asset_url}/{self.asset_css}", self.assets / self.asset_css)
template_loader = FileSystemLoader(self.assets)
template_env = Environment(loader=template_loader)
template_env.globals["include_raw"] = include_raw
self.template = template_env.get_template("template.html")
def write(self, report_doc: str, path: str, report_type: ReportType, standalone: bool):
# create template on demand
if not self.template:
self._setup_template()
# template.html inlines the report doc with backticks so we need to escape any inside the doc
report_doc_esc = report_doc.replace("`", r"\`")
r = self.template.render(
report_doc=report_doc_esc,
report_type=report_type,
standalone=standalone,
cdn_base=f"https://storage.googleapis.com/datapane-public/report-assets/{__version__}",
)
Path(path).write_text(r, encoding="utf-8")
################################################################################
# Report DPObject
class Report(DPObjectRef):
"""
Reports collate plots, text, tables, and files into an interactive report that
can be analysed and shared by users in their Browser
"""
endpoint: str = "/reports/"
pages: t.List[Page]
_last_saved: t.Optional[str] = None # Path to local report
_tmp_report: t.Optional[Path] = None # Temp local report
_local_writer = ReportFileWriter()
report_type: ReportType = ReportType.REPORT
list_fields: t.List[str] = ["name", "web_url", "versions"]
"""When set, the report is full-width suitable for use in a dashboard"""
def __init__(
self,
*arg_blocks: PageOrPrimitive,
blocks: t.Optional[t.List[PageOrPrimitive]] = None,
type: ReportType = ReportType.REPORT,
**kwargs,
):
"""
Args:
*arg_blocks: Group to add to report
blocks: Allows providing the report blocks as a single list
type: Set the Report type, this will affect the formatting and layout of the report
Returns:
A `Report` object that can be published, saved, etc.
.. tip:: Group can be passed using either arg parameters or the `blocks` kwarg, e.g.
`dp.Report(plot, table)` or `dp.Report(blocks=[plot, table])`
"""
super().__init__(**kwargs)
self.report_type = type
self._preprocess_pages(blocks or list(arg_blocks))
def _preprocess_pages(self, pages: t.List[BlockOrPrimitive]):
# pre-process report blocks
if all(isinstance(b, Page) for b in pages):
# we have all pages
self.pages = t.cast(t.List[Page], pages)
elif all(isinstance(b, (Group, Select)) for b in pages):
# all blocks - wrap as a single page
self.pages = [Page(blocks=pages)]
else:
# add additional top-level Group element to group mixed elements
self.pages = [Page(blocks=[Group(blocks=pages)])]
def _gen_report(self, embedded: bool, title: str, description: str) -> t.Tuple[str, t.List[Path]]:
"""Build XML report document"""
# convert Pages to XML
s = BuilderState(embedded)
_s = reduce(lambda _s, p: p._to_xml(_s), self.pages, s)
# add main structure and Meta
report_doc: Element = E.Report(
E.Meta(
E.Author("Anonymous"), # TODO - get username from config?
E.CreatedOn(timestamp()),
E.Title(title),
E.Description(description),
),
E.Main(*_s.elements, type=self.report_type.value),
version="1",
)
report_doc.set("{http://www.w3.org/XML/1998/namespace}id", f"_{uuid.uuid4().hex}")
# post_process and validate
processed_report_doc = local_post_transform(report_doc, embedded="true()" if embedded else "false()")
validate_report_doc(xml_doc=processed_report_doc)
# convert to string
report_str = etree.tounicode(processed_report_doc, pretty_print=True)
log.debug("Built Report")
log.info(report_str)
return (report_str, _s.attachments)
def publish(
self,
name: str,
description: str = "",
source_url: str = "",
visibility: t.Optional[Visibility] = None,
open: bool = False,
tags: t.Optional[t.List[str]] = None,
**kwargs,
) -> None:
"""
Publish the report, including its attached assets, to the logged-in Datapane Server.
Args:
name: The report name - can include spaces, caps, symbols, etc., e.g. "Profit & Loss 2020"
description: A high-level description for the report, this is displayed in searches and thumbnails
source_url: A URL pointing to the source code for the report, e.g. a GitHub repo or a Colab notebook
visibility: one of `"PUBLIC"` _(default on Public)_, `"ORG"` _(Teams only)_, or `"PRIVATE"` _(limited on Public, unlimited on Teams)_
open: Open the file in your browser after creating
tags: A list of tags (as strings) used to categorise your report
"""
print("Publishing report and associated data - please wait..")
# process params
tags = tags or []
# TODO - remove deprecation
if isinstance(visibility, str):
visibility_str = visibility
warnings.warn("Passing visibility as a string is deprecated, use dp.Visibility enum instead.")
else:
visibility_str = glom(visibility, "name", default=None)
kwargs.update(name=name, description=description, tags=tags, source_url=source_url, visibility=visibility_str)
report_str, attachments = self._gen_report(embedded=False, title=name, description=description)
res = Resource(self.endpoint).post_files(dict(attachments=attachments), document=report_str, **kwargs)
# Set dto based on new URL
self.url = res.url
self.refresh()
# add report to internal API handler for use by_datapane
_report.append(self)
if open:
webbrowser.open_new_tab(self.web_url)
print(f"Report successfully published at {self.web_url}")
def save(self, path: str, open: bool = False, standalone: bool = False) -> None:
"""Save the report to a local HTML file
Args:
path: location to save the HTML file
open: Open the file in your browser after creating
standalone: Create a fully standalone HTML report with minimal external/network dependencies _(this can result in large files)_
"""
self._last_saved = path
local_doc, _ = self._gen_report(embedded=True, title="Local Report", description="Description")
self._local_writer.write(local_doc, path, self.report_type, standalone)
if open:
path_uri = f"file://{osp.realpath(osp.expanduser(path))}"
webbrowser.open_new_tab(path_uri)
def preview(self, width: int = 600, height: int = 500, standalone: bool = False) -> None:
"""
Preview the report inside your currently running Jupyter notebook
Args:
width: Width of the report preview in Jupyter (default: 600)
height: Height of the report preview in Jupyter (default: 500)
standalone: Create a fully standalone HTML report with minimal external/network dependencies _(this can result in large files)
"""
if is_jupyter():
from IPython.display import IFrame
# Remove the previous temp report if it's been generated
if self._tmp_report and self._tmp_report.exists():
self._tmp_report.unlink()
# We need to copy the report HTML to a local temp file,
# as most browsers block iframes to absolute local paths.
tmpfile = DPTmpFile(ext=".html")
if self._last_saved:
# Copy to tmp file if already saved
shutil.copy(self._last_saved, tmpfile.name)
else:
# Else save directly to tmp file
self.save(path=tmpfile.name, standalone=standalone)
self._tmp_report = tmpfile.file
# NOTE - iframe must be relative path
iframe_src = self._tmp_report.relative_to(Path(".").absolute())
return IFrame(src=str(iframe_src), width=width, height=height)
else:
log.warning("Can't preview - are you running in Jupyter?")
|
py | b411b79557c7c7bdd88f52280dcd9b14b3d35b8d | # -*- coding: utf-8 -*-
import numpy as np
import treecode.energy_and_momentum as EM
def is_gravity_field_weak(particles, C_2):
# Функция, выдающая ошибку, если гравитационное поле становится
# слишком сильным для применения используемой модели
ERROR_NAME = ''
# Считаем величину phi / c^2, где phi - гравитационный потенциал
array_phi = abs(particles[:, 10] / C_2)
# если модуль phi / c^2 превышает определенное значение, то
# гравитационное поле считаем сильным что выходит за границы
# применимости используемой модели
array_phi = array_phi >= 0.05
if array_phi.any():
ERROR_NAME = 'Strong gravity field error'
return ERROR_NAME
def speed_limit(particles, C_2):
# Функция, выдающая ошибку если скорость материальной
# точки станет больше скорости света
ERROR_NAME = ''
v = np.zeros([np.size(particles, 0), 3])
v = np.multiply(particles[:, 3:6], particles[:, 3:6])
v_2 = v.sum(axis=1) >= C_2
if v_2.any():
ERROR_NAME = 'FTL error'
return ERROR_NAME
def enegry_parameters(q, X):
# Считаем количество вылетевших из системы частиц
part_num = 0
while X[part_num, 11] < 0:
part_num += 1
if part_num == (np.size(X, 0)):
break
momentum = EM.momentum_of_system(X)
kinetic_energy = EM.system_kinetic_energy(X)
potential_energy = EM.system_potential_energy(X)
kinetic_in_volume = EM.kinetic_energy_in_volume(X, part_num)
potential_in_volume = EM.potential_energy_in_volume(X, part_num)
virial_coeff_system = - kinetic_energy / potential_energy
virial_coeff_selected_volume = - kinetic_in_volume / potential_in_volume
# momentum_in_volume = EM.momentum_of_system_in_volume(X, part_num)
# Записываем энергию системы в отдельный массив
# 1) номер шага
# 2) кинетическая энергия всей системы
# 3) потенциальная энергия всей системы
# 4) полная энергия всей системы
# 5) максимальная разница в кинетической энергии
# между исследуемым шагом и предыдущим
# 6) максимальная разница в потенциальной энергии
# между исследуемым шагом и предыдущим
# 7) импульс системы по оси X
# 8) импульс системы по оси Y
# 9) импульс системы по оси Z
# 10) кинетическая энергия всех частиц в объеме
# 11) потенциальная энергия всех частиц в объеме
# 12) полная энергия всех частиц в объеме
ENERGY = [q,
kinetic_energy,
potential_energy,
EM.system_energy_Newton(X),
EM.max_dT(X),
EM.max_dU(X),
momentum[0],
momentum[1],
momentum[2],
kinetic_in_volume,
potential_in_volume,
EM.system_energy_in_volume(X, part_num),
virial_coeff_system,
virial_coeff_selected_volume]
return ENERGY
|
py | b411b7b8fbb1fad6510337a493a234888205a2ad | """Design chunks source code
"""
import json
def design_chunks(file_index_file, activity_index_file, save_path, num_videos_per_chunk=4):
""" Merge file_index with activity_index content into different chunks written in a file
Args:
file_index_file (str): Path to file index json file
activity_index_file (str): Path to activity index json file
save_path (str): Path to save chunks file
num_videos_per_chunks(int): number of videos in a chunk
"""
file_index = json.load(open(file_index_file, 'r'))
activity_index = json.load(open(activity_index_file, 'r'))
chunk_dict = {}
chunk_count = 0
chunk_prefix = "Chunk"
all_activities = list(activity_index.keys())
for index, file_name in enumerate(file_index.keys()):
if index % num_videos_per_chunk == 0:
# start a new chunk
chunk_count += 1
chunk_name = chunk_prefix + str(chunk_count)
chunk_dict[chunk_name] = {"activities": all_activities,
"files": []}
chunk_name = chunk_prefix + str(chunk_count)
chunk_dict[chunk_name]["files"].append(file_name)
with open(save_path, 'w') as f:
json.dump(chunk_dict, f, indent=2)
if __name__ == '__main__':
file_index_file=sys.argv[1]
activity_index_file=sys.argv[2]
save_path=sys.argv[3] ## this is dir + name
num_videos_per_chunk=int(sys.argv[4])
design_chunks(file_index_file, activity_index_file, save_path, num_videos_per_chunk) |
py | b411ba29941386ba6cf3dd5f810ace52a3b0d0c3 | #!/usr/bin/env python
"""
Point Grey camera used in the context of a focus lock.
Hazen 09/19
Evan 06/21
"""
import numpy
import time
from PyQt5 import QtCore
import storm_control.sc_hardware.utility.af_lock_c as afLC
import storm_control.sc_hardware.pointGrey.spinnaker as spinnaker
import tifffile
# ===== Import fitting libraries. ===========
# Numpy fitter, this should always be available.
import storm_control.sc_hardware.utility.np_lock_peak_finder as npLPF
# Finding/fitting using the storm-analysis project.
saLPF = None
try:
import storm_control.sc_hardware.utility.sa_lock_peak_finder as saLPF
except ModuleNotFoundError as mnfe:
print(">> Warning! Storm analysis lock fitting module not found. <<")
print(mnfe)
pass
# Finding using the storm-analysis project, fitting using image correlation.
cl2DG = None
try:
import storm_control.sc_hardware.utility.corr_lock_c2dg as cl2DG
except ModuleNotFoundError as mnfe:
# Only need one warning about the lack of storm-analysis.
pass
except OSError as ose:
print(">> Warning! Correlation lock fitting C library not found. <<")
print(ose)
pass
#==============================
class LockCamera(QtCore.QThread):
"""
This class is used to control a Point Grey (Spinnaker) camera in the
context of a focus lock.
"""
cameraUpdate = QtCore.pyqtSignal(dict)
def __init__(self, camera_id = None, parameters = None, **kwds):
super().__init__(**kwds)
self.cur_offsetx = None
self.cur_offsety = None
self.old_offsetx = None
self.old_offsety = None
self.max_offsetx = None
self.max_offsety = None
self.n_analyzed = 0
self.n_dropped = 0
self.start_time = None
self.params_mutex = QtCore.QMutex()
self.running = False
self.zero_dist = parameters.get("zero_dist")
# Initialize library.
spinnaker.pySpinInitialize(verbose = False)
# Get the camera & set some defaults.
self.camera = spinnaker.getCamera(camera_id)
#print(f'self.camera = { self.camera}')
# Only Grasshopper has defect correction
if self.camera.hasProperty("VideoMode"):
self.camera.setProperty("VideoMode", parameters.get("video_mode"))
self.camera.setProperty("pgrDefectPixelCorrectionEnable", False)
# Set pixel format.
self.camera.setProperty("PixelFormat", "Mono16")
# We don't want any of these 'features'.
#self.camera.setProperty("AcquisitionFrameRateAuto", "Off")
self.camera.setProperty("AcquisitionMode", "Continuous")
self.camera.setProperty("ExposureAuto", "Off")
self.camera.setProperty("GainAuto", "Off")
if self.camera.hasProperty("pgrExposureCompensationAuto"):
self.camera.setProperty("pgrExposureCompensationAuto", "Off")
if self.camera.hasProperty("BlackLevelClampingEnable"):
self.camera.setProperty("BlackLevelClampingEnable", False)
if self.camera.hasProperty("SharpnessEnabled"):
self.camera.setProperty("SharpnessEnabled", False)
if self.camera.hasProperty("GammaEnabled"):
self.camera.setProperty("GammaEnabled", False)
#
# No idea what this means in the context of a black and white
# camera. We try and turn it off but that seems to be much
# harder to do than one would hope.
#
#self.camera.setProperty("OnBoardColorProcessEnabled", False)
# Verify that we have turned off some of these 'features'.
for feature in ["pgrDefectPixelCorrectionEnable",
"BlackLevelClampingEnable",
"SharpnessEnabled",
"GammaEnabled"]:
if self.camera.hasProperty(feature):
assert not self.camera.getProperty(feature).getValue()
# Configure camera to not use triggering.
#
self.camera.setProperty("TriggerMode", "Off")
# Configure acquisition parameters.
#
# Note: The order is important here.
#
for pname in ["BlackLevel", "Gain", "Height", "Width", "OffsetX", "OffsetY"]:
self.camera.setProperty(pname, parameters.get(pname))
# Use maximum exposure time alowed by desired frame rate. # , "AcquisitionFrameRate"
# Line below does not work with blackfly camera. Exposure time needs to be set explicitly
#self.camera.setProperty("ExposureTime", self.camera.getProperty("ExposureTime").getMaximum())
self.camera.setProperty("ExposureTime", 20000.0)
# Get current offsets.
#
self.cur_offsetx = self.camera.getProperty("OffsetX").getValue()
self.cur_offsety = self.camera.getProperty("OffsetY").getValue()
self.old_offsetx = self.cur_offsetx
self.old_offsety = self.cur_offsety
# Set maximum offsets.
#
self.max_offsetx = self.camera.getProperty("OffsetX").getMaximum()
self.max_offsety = self.camera.getProperty("OffsetY").getMaximum()
def adjustAOI(self, dx, dy):
tmp_x = self.cur_offsetx + dx
tmp_y = self.cur_offsety + dy
tmp_x = max(0, tmp_x)
tmp_x = min(self.max_offsetx, tmp_x)
tmp_y = max(0, tmp_y)
tmp_y = min(self.max_offsety, tmp_y)
#
# The thread loop will check for cur != old and update the camera values
# as necessary.
#
self.params_mutex.lock()
self.cur_offsetx = tmp_x
self.cur_offsety = tmp_y
self.params_mutex.unlock()
def adjustZeroDist(self, inc):
pass
def run(self):
self.camera.startAcquisition()
self.running = True
while(self.running):
[frames, frame_size] = self.camera.getFrames()
self.analyze(frames, frame_size)
# Check for AOI change.
self.params_mutex.lock()
if (self.old_offsetx != self.cur_offsetx) or (self.old_offsety != self.cur_offsety):
self.camera.stopAcquisition()
self.camera.setProperty("OffsetX", self.cur_offsetx)
self.camera.setProperty("OffsetY", self.cur_offsety)
self.camera.startAcquisition()
self.old_offsetx = self.cur_offsetx
self.old_offsety = self.cur_offsety
self.params_mutex.unlock()
self.msleep(5)
self.camera.stopAcquisition()
def startCamera(self):
self.start(QtCore.QThread.NormalPriority)
self.start_time = time.time()
def stopCamera(self, verbose = False):
if verbose:
fps = self.n_analyzed/(time.time() - self.start_time)
print(" > AF: Analyzed {0:d}, Dropped {1:d}, {2:.3f} FPS".format(self.n_analyzed, self.n_dropped, fps))
print(" > AF: OffsetX {0:d}, OffsetY {1:d}, ZeroD {2:.2f}".format(self.cur_offsetx, self.cur_offsety, self.zero_dist))
self.running = False
self.wait()
self.camera.shutdown()
class CameraQPD(LockCamera):
"""
QPD emulation class. The default camera ROI of 200x200 pixels.
The focus lock is configured so that there are two laser spots on the camera.
The distance between these spots is fit and the difference between this distance and the
zero distance is returned as the focus lock offset. The maximum value of the camera
pixels is returned as the focus lock sum.
"""
def __init__(self, parameters = None, **kwds):
kwds["parameters"] = parameters
super().__init__(**kwds)
# fixed parameters
self.x_off1 = 0.0
self.y_off1 = 0.0
self.x_off2 = 0.0
self.y_off2 = 0.0
self.image - None # will be loaded below
self.allow_single_fits = False # parameters.get("allow_single_fits") # False
self.sigma = parameters.get("sigma") # 5
self.background = parameters.get("background") # background
self.fit_size = parameters.get("fit_size")*self.sigma # 1.5, relative to sigma
# Some derived parameters
self.half_x = int(self.x_width/2)
self.half_y = int(self.y_width/2)
# maybe good things to add
'''
self.reps = parameters.get("reps")
self.sum_scale = parameters.get("sum_scale")
self.sum_zero = parameters.get("sum_zero")
self.good = numpy.zeros(self.reps, dtype = numpy.bool)
self.mag = numpy.zeros(self.reps)
self.x_off = numpy.zeros(self.reps)
self.y_off = numpy.zeros(self.reps)
'''
# def adjustAOI is defined above
def adjustZeroDist(self, inc):
self.params_mutex.lock()
self.zero_dist += 0.1*inc
self.params_mutex.unlock()
def getImage(self):
return [self.image, self.x_off1, self.y_off1, self.x_off2, self.y_off2, self.sigma]
def analyze(self, frames, frame_size):
# Only keep the last max_backlog frames if we are falling behind.
lf = len(frames)
if (lf>self.max_backlog):
self.n_dropped += lf - self.max_backlog
frames = frames[-self.max_backlog:]
for elt in frames:
self.n_analyzed += 1
frame = elt.getData().reshape(frame_size)
self.image = frame
image1 = frame[self.roi1]
image2 = frame[self.roi2]
def getZeroDist(self):
return self.zero_dist
def qpdScan(self, reps = 4):
"""
Returns [power, offset, is_good]
"""
power_total = 0.0
offset_total = 0.0
good_total = 0.0
for i in range(reps):
[power, n_good, offset] = self.singleQpdScan()
power_total += power
good_total += n_good
offset_total += offset
power_total = power_total/float(reps)
if (good_total > 0):
return [power_total, offset_total/good_total, True]
else:
return [power_total, 0, False]
def singleQpdScan(self):
"""
Perform a single measurement of the focus lock offset and camera sum signal.
Returns [power, total_good, offset]
"""
# Determine offset by fitting gaussians to the two beam spots.
# In the event that only beam spot can be fit then this will
# attempt to compensate. However this assumes that the two
# spots are centered across the mid-line of camera ROI.
#
[total_good, dist1, dist2] = self.doFit(data)
# Calculate offset.
# No good fits.
if (total_good == 0):
return [power, 0.0, 0.0]
# One good fit.
elif (total_good == 1):
if self.allow_single_fits:
return [power, 1.0, ((dist1 + dist2) - 0.5*self.zero_dist)]
else:
return [power, 0.0, 0.0]
# Two good fits. This gets twice the weight of one good fit
# if we are averaging.
else:
return [power, 2.0, 2.0*((dist1 + dist2) - self.zero_dist)]
class CameraQPDCorrFit(CameraQPD):
"""
This version uses storm-analyis to do the peak finding and
image correlation to do the peak fitting.
"""
def __init__(self, **kwds):
super().__init__(**kwds)
assert (cl2DG is not None), "Correlation fitting not available."
self.fit_hl = None
self.fit_hr = None
def doFit(self, data):
dist1 = 0
dist2 = 0
self.x_off1 = 0.0
self.y_off1 = 0.0
self.x_off2 = 0.0
self.y_off2 = 0.0
if self.fit_hl is None:
roi_size = int(3.0 * self.sigma)
self.fit_hl = cl2DG.CorrLockFitter(roi_size = roi_size,
sigma = self.sigma,
threshold = 10)
self.fit_hr = cl2DG.CorrLockFitter(roi_size = roi_size,
sigma = self.sigma,
threshold = 10)
total_good = 0
[x1, y1, status] = self.fit_hl.findFitPeak(data[:,:self.half_x])
if status:
total_good += 1
self.x_off1 = x1 - self.half_y
self.y_off1 = y1 - self.half_x
dist1 = abs(self.y_off1)
[x2, y2, status] = self.fit_hr.findFitPeak(data[:,-self.half_x:])
if status:
total_good += 1
self.x_off2 = x2 - self.half_y
self.y_off2 = y2
dist2 = abs(self.y_off2)
return [total_good, dist1, dist2]
def shutDown(self):
super().shutDown()
if self.fit_hl is not None:
self.fit_hl.cleanup()
self.fit_hr.cleanup()
class AFLockCamera(LockCamera):
"""
This class works with the auto-focus hardware configuration.
In this configuration there are two spots that move horizontally
as the focus changes. The spots are shifted vertically so that
they don't overlap with each other.
"""
def __init__(self, parameters = None, **kwds):
kwds["parameters"] = parameters
super().__init__(**kwds)
self.cnt = 0
self.max_backlog = 20
self.min_good = parameters.get("min_good")
self.reps = parameters.get("reps")
self.sum_scale = parameters.get("sum_scale")
self.sum_zero = parameters.get("sum_zero")
self.good = numpy.zeros(self.reps, dtype = numpy.bool)
self.mag = numpy.zeros(self.reps)
self.x_off = numpy.zeros(self.reps)
self.y_off = numpy.zeros(self.reps)
# Create slices for selecting the appropriate regions from the camera.
# This is where the problem comes up
t1 = list(map(int, parameters.get("roi1").split(",")))
self.roi1 = (slice(t1[0], t1[1]), slice(t1[2], t1[3]))
t2 = list(map(int, parameters.get("roi2").split(",")))
self.roi2 = (slice(t2[0], t2[1]), slice(t2[2], t2[3]))
self.afc = afLC.AFLockC(offset = parameters.get("background"),
downsample = parameters.get("downsample"))
assert (self.reps >= self.min_good), "'reps' must be >= 'min_good'."
def adjustZeroDist(self, inc):
self.params_mutex.lock()
self.zero_dist += 0.001*inc
self.params_mutex.unlock()
def analyze(self, frames, frame_size):
# testing inputs
# frame_size = (1440,1080)
# the frames list has class objects inside, specifically:
# <class 'storm_control.sc_hardware.pointGrey.spinnaker.ScamData>
if False:
print('\n\n--------------- \ndef analyze\n--------------------')
print(f'type(frames)= {type(frames)}')
print(f'len(frames)= {len(frames)}')
if len(frames)> 0:
print(f'type(frames[0]) = { type(frames[0])}')
print(f'type(frame_size) = {type(frame_size)}')
print(f'frame_size = {frame_size}')
# Only keep the last max_backlog frames if we are falling behind.
lf = len(frames)
if (lf>self.max_backlog):
self.n_dropped += lf - self.max_backlog
frames = frames[-self.max_backlog:]
for elt in frames:
self.n_analyzed += 1
if False:
# testing what happens without the reshapping
print('saving frame_test1.npy ...')
frame_test1 = elt.getData()
numpy.save(r'C:\Users\STORM1\Desktop\focus_lock_debugging\frame_test1.npy',frame_test1)
# or reshapping using the inverted x and Y direction
print('saving frame_test2.npy ...')
frame_test2 = elt.getData().reshape((frame_size[1],frame_size[0]))
numpy.save(r'C:\Users\STORM1\Desktop\focus_lock_debugging\frame_test2.npy',frame_test2)
# Not sure why, but the dimensions are swapped
#frame = elt.getData().reshape(frame_size)
frame = elt.getData().reshape((frame_size[1],frame_size[0]))
image1 = frame[self.roi1]
image2 = frame[self.roi2]
# Debugging ROI shape issues
if False:
print('\n\n------------------------\nDebugging ROI shape issues')
print(f'self.roi1 = {self.roi1}')
print(f'self.roi2 = {self.roi2}')
print(f'frame.shape = {frame.shape}')
print(f'image1.shape = {image1.shape}')
print(f'image2.shape = {image2.shape}')
print(f'image1.dtype = {image1.dtype}')
print(f'image2.dtype = {image2.dtype}')
print('------------------------\n\n')
# Debugging image1 and image2 into findOffsetU16NM
if False:
# testing what happens without the reshapping
print('saving image1.npy and image2.npy...')
numpy.save(r'C:\Users\STORM1\Desktop\focus_lock_debugging\image1.npy',image1)
numpy.save(r'C:\Users\STORM1\Desktop\focus_lock_debugging\image2.npy',image2)
# This is the offending line
[x_off, y_off, success, mag] = self.afc.findOffsetU16NM(image1, image2, verbose = True)
if False:
print('\n\n------------------------\nDebugging findOffsetU16NM')
print(f'x_off = {x_off}')
print(f'y_off = {y_off}')
#self.bg_est[self.cnt] = frame[0,0]
self.good[self.cnt] = success
self.mag[self.cnt] = mag
self.x_off[self.cnt] = x_off
self.y_off[self.cnt] = y_off
# Check if we have all the samples we need.
self.cnt += 1
if (self.cnt == self.reps):
# Convert current frame to 8 bit image.
# 201218 seems like our camera data is 16 bit not 12 bit (even though ADC is 12 bit)
#image = numpy.right_shift(frame, 3).astype(numpy.uint8) #convert from 12 bit
image = numpy.right_shift(frame, 4).astype(numpy.uint8) #convert from 16 bit
#debugging save image to check how it looks.
# result: frame and image are already scrambled.
if False:
numpy.save(r'C:\Users\STORM1\Desktop\focus_lock_debugging\image.npy',image)
numpy.save(r'C:\Users\STORM1\Desktop\focus_lock_debugging\frame.npy',frame)
qpd_dict = {"is_good" : True,
"image" : image,
"offset" : 0.0,
"sum" : 0.0,
"x_off" : 0.0,
"y_off" : 0.0}
if (numpy.count_nonzero(self.good) < self.min_good):
qpd_dict["is_good"] = False
self.cameraUpdate.emit(qpd_dict)
else:
mag = numpy.mean(self.mag[self.good])
y_off = numpy.mean(self.y_off[self.good]) - self.zero_dist
qpd_dict["offset"] = y_off
qpd_dict["sum"] = self.sum_scale*mag - self.sum_zero
qpd_dict["x_off"] = numpy.mean(self.x_off[self.good])
qpd_dict["y_off"] = y_off
self.cameraUpdate.emit(qpd_dict)
self.cnt = 0
def stopCamera(self):
super().stopCamera()
self.afc.cleanup()
class SSLockCamera(LockCamera):
"""
This class works with the standard IR laser focus lock configuration.
In this configuration there is a single spot that movies horizontally
as the focus changes.
"""
def __init__(self, parameters = None, **kwds):
kwds["parameters"] = parameters
super().__init__(**kwds)
self.cnt = 0
self.max_backlog = 20
self.min_good = parameters.get("min_good")
self.offset = parameters.get("offset")
self.reps = parameters.get("reps")
self.sum_scale = parameters.get("sum_scale")
self.sum_zero = parameters.get("sum_zero")
self.good = numpy.zeros(self.reps, dtype = numpy.bool)
self.mag = numpy.zeros(self.reps)
self.x_off = numpy.zeros(self.reps)
self.y_off = numpy.zeros(self.reps)
self.lpf = slpf.LockPeakFinder(offset = 0,
sigma = parameters.get("sigma"),
threshold = parameters.get("threshold"))
assert (self.reps >= self.min_good), "'reps' must be >= 'min_good'."
def adjustZeroDist(self, inc):
self.params_mutex.lock()
self.zero_dist += 0.1*inc
self.params_mutex.unlock()
def analyze(self, frames, frame_size):
# Only keep the last max_backlog frames if we are falling behind.
lf = len(frames)
if (lf>self.max_backlog):
self.n_dropped += lf - self.max_backlog
frames = frames[-self.max_backlog:]
for elt in frames:
self.n_analyzed += 1
frame = elt.getData().reshape(frame_size)
# self.offset is slightly below what the camera reads with no
# signal. We'll be doing MLE fitting so we can't tolerate
# negative values in 'frame'.
frame = frame - self.offset
# Magnitude calculation.
mag = numpy.max(frame) - numpy.mean(frame)
# Fit peak X/Y location.
[x_off, y_off, success] = self.lpf.findFitPeak(frame)
self.good[self.cnt] = success
self.mag[self.cnt] = mag
self.x_off[self.cnt] = x_off
self.y_off[self.cnt] = y_off
# Check if we have all the samples we need.
self.cnt += 1
if (self.cnt == self.reps):
# Convert current frame to 8 bit image.
image = numpy.right_shift(frame.astype(numpy.uint16), 3).astype(numpy.uint8)
mag = numpy.mean(self.mag)
qpd_dict = {"is_good" : True,
"image" : image,
"offset" : 0.0,
"sum" : self.sum_scale*mag - self.sum_zero,
"x_off" : 0.0,
"y_off" : 0.0}
if (numpy.count_nonzero(self.good) < self.min_good):
qpd_dict["is_good"] = False
self.cameraUpdate.emit(qpd_dict)
else:
y_off = numpy.mean(self.y_off[self.good]) - self.zero_dist
qpd_dict["offset"] = y_off
qpd_dict["x_off"] = numpy.mean(self.x_off[self.good])
qpd_dict["y_off"] = y_off
self.cameraUpdate.emit(qpd_dict)
self.cnt = 0
def stopCamera(self):
super().stopCamera()
self.lpf.cleanup()
#
# The MIT License
#
# Copyright (c) 2020 Babcock Lab, Harvard University
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
|
py | b411ba6a30135cd01909cebcb27f379587cc20a2 | import os
import numpy as np
from sklearn import metrics
from config import *
def evaluation(labels_true_path, labels_pred_path):
"""
정답 레이블과 예측 레이블 파일을 읽어서 Adjusted Rand index를 계산하여 반환하는 함수입니다.
:param labels_true_path: 정답 이미지 레이블 파일
labels_true의 예:
01
01
01
02
02
03
...
:param labels_pred_path: 예측 이미지 레이블 파일
labels_pred의 예:
01
01
02
02
03
04
...
:return: Adjust Rand index (0~1) 계산하여 출력하고 점수를 반환함. 1에 가까울수록 정확
예:
Rand Index: 0.18918918918918917
참고: https://scikit-learn.org/stable/modules/clustering.html#adjusted-rand-index
"""
# loading labels
labels_true = np.loadtxt(labels_true_path, dtype=str)
labels_pred = np.loadtxt(labels_pred_path, dtype=str)
# compare labels
return metrics.adjusted_rand_score(labels_true, labels_pred)
if __name__ == '__main__':
"""
평가 예시:
labels_pred1, labels_pred2 두개 가상의 레이블 파일을 각각 평가해 봅니다.
labels_ture 01, 01, 01, 02, 02, 03
labels_pred1 01, 01, 02, 02, 03, 04
labels_pred2 01, 01, 01, 02, 03, 04
실행한 결과는 다음과 같습니다.
Score for labels_pred1.txt: 0.18918918918918917
Score for labels_pred2.txt: 0.8148148148148149
labels_pred2 가 좀 더 정확하기 때문에 점수가 높게 평가 됩니다.
"""
labels_pred1 = "labels_pred1.txt"
labels_pred2 = "labels_pred2.txt"
score1 = evaluation(os.path.join(DATA_DIR, LABELS_TRUE + ".txt"), os.path.join(DATA_DIR, labels_pred1))
score2 = evaluation(os.path.join(DATA_DIR, LABELS_TRUE + ".txt"), os.path.join(DATA_DIR, labels_pred2))
print("Score for %s: %s" % (labels_pred1, score1))
print("Score for %s: %s" % (labels_pred2, score2))
# if __name__ == '__main__':
# score = evaluation(os.path.join(DATA_DIR, LABELS_TRUE + ".txt"), os.path.join(DATA_DIR, LABELS_PRED + ".txt"))
# print("Rand Index: %s" % score)
|
py | b411bbd6aaedde91a49ba976a7e62643dc9e71b6 | ##########################################################################
#
# Copyright (c) 2012, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import imath
import tempfile
import shutil
import os
import IECore
import IECoreScene
class SWAReaderTest( unittest.TestCase ) :
def testConstruction( self ) :
r = IECoreScene.SWAReader()
self.assertEqual( r["fileName"].getTypedValue(), "" )
r = IECoreScene.SWAReader( os.path.join( "test", "IECore", "data", "swaFiles", "test.swa" ) )
self.assertEqual( r["fileName"].getTypedValue(), os.path.join( "test", "IECore", "data", "swaFiles", "test.swa" ) )
def testReading( self ) :
r = IECoreScene.SWAReader( os.path.join( "test", "IECore", "data", "swaFiles", "test.swa" ) )
o = r.read()
tempDir = tempfile.mkdtemp()
IECore.ObjectWriter( o, os.path.join(tempDir, "trees4.cob" ) ).write()
self.assertTrue( o.isInstanceOf( IECoreScene.PointsPrimitive.staticTypeId() ) )
self.assertEqual( o.numPoints, 5 + 6 )
self.assertTrue( o.arePrimitiveVariablesValid() )
self.assertTrue( "P" in o )
self.assertTrue( "xAxis" in o )
self.assertTrue( "yAxis" in o )
self.assertTrue( "zAxis" in o )
self.assertTrue( "scale" in o )
self.assertTrue( "treeName" in o )
self.assertTrue( "treeNameIndices" in o )
self.assertEqual( o["P"].interpolation, IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( o["xAxis"].interpolation, IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( o["yAxis"].interpolation, IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( o["zAxis"].interpolation, IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( o["scale"].interpolation, IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( o["treeNameIndices"].interpolation, IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( o["treeName"].interpolation, IECoreScene.PrimitiveVariable.Interpolation.Constant )
self.assertTrue( isinstance( o["P"].data, IECore.V3fVectorData ) )
self.assertTrue( isinstance( o["xAxis"].data, IECore.V3fVectorData ) )
self.assertTrue( isinstance( o["yAxis"].data, IECore.V3fVectorData ) )
self.assertTrue( isinstance( o["zAxis"].data, IECore.V3fVectorData ) )
self.assertTrue( isinstance( o["scale"].data, IECore.FloatVectorData ) )
self.assertTrue( isinstance( o["treeNameIndices"].data, IECore.IntVectorData ) )
self.assertTrue( isinstance( o["treeName"].data, IECore.StringVectorData ) )
self.assertEqual( o["treeName"].data, IECore.StringVectorData( [ "Acacia_RT", "BroadLeaf_HighDetail" ] ) )
self.assertEqual( o["P"].data[0], imath.V3f( 3750.05, 1556.86, -2149.22 ) )
self.assertEqual( o["yAxis"].data[0], imath.V3f( 0.0176831, 0.998519, 0.0514542 ) )
self.assertEqual( o["xAxis"].data[0], imath.V3f( 0.0179192, -0.0517705, 0.998498 ) )
self.assertEqual( o["zAxis"].data[0], o["xAxis"].data[0].cross( o["yAxis"].data[0] ) )
self.assertAlmostEqual( o["scale"].data[0], 6.4516, 6 )
self.assertAlmostEqual( o["scale"].data[1], 6.7, 6 )
self.assertEqual( o["treeNameIndices"].data, IECore.IntVectorData( [ 0 ] * 5 + [ 1 ] * 6 ) )
shutil.rmtree( tempDir )
def testCanRead( self ) :
self.assertTrue( IECoreScene.SWAReader.canRead( os.path.join( "test", "IECore", "data", "swaFiles", "test.swa" ) ) )
self.assertFalse( IECoreScene.IDXReader.canRead( os.path.join( "test", "IECore", "data", "cobFiles/ball.cob" ) ) )
self.assertFalse( IECoreScene.SWAReader.canRead( os.path.join( "test", "IECore", "data", "idxFiles", "test.idx" ) ) )
self.assertFalse( IECoreScene.SWAReader.canRead( os.path.join( "test", "IECore", "data", "empty" ) ) )
def testRegistration( self ) :
r = IECore.Reader.create( os.path.join( "test", "IECore", "data", "swaFiles", "test.swa" ) )
self.assertTrue( isinstance( r, IECoreScene.SWAReader ) )
if __name__ == "__main__":
unittest.main()
|
py | b411bc3b2f77e30f66d889dcae9d0b2fff4c6c7f | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of twin_sac, a mix of TD3 (https://arxiv.org/abs/1802.09477) and SAC (https://arxiv.org/abs/1801.01290, https://arxiv.org/abs/1812.05905).
Overall structure and hyperparameters are taken from TD3. However, the algorithm
itself represents a version of SAC.
"""
import typing
from typing import Optional
from dm_env import specs as dm_env_specs
import tensorflow as tf
import tensorflow_probability as tfp
from tf_agents.utils import object_identity
from representation_batch_rl.batch_rl.encoders import ImageEncoder
tfd = tfp.distributions
LOG_STD_MIN = -20
LOG_STD_MAX = 2
class BasePolicy(tf.keras.Model):
"""Base class for policies."""
def __init__(self,
state_dim,
action_dim,
action_spec,
hidden_dims = (256, 256),
eps = 1e-6):
"""Creates an actor.
Args:
state_dim: State size.
action_dim: Actiom size.
action_spec: Action spec.
hidden_dims: List of hidden dimensions.
eps: Epsilon for numerical stability.
"""
super().__init__()
relu_gain = tf.math.sqrt(2.0)
relu_orthogonal = tf.keras.initializers.Orthogonal(relu_gain)
near_zero_orthogonal = tf.keras.initializers.Orthogonal(1e-2)
layers = []
for hidden_dim in hidden_dims:
layers.append(
tf.keras.layers.Dense(
hidden_dim,
activation=tf.nn.relu,
kernel_initializer=relu_orthogonal))
inputs = tf.keras.Input(shape=(state_dim,))
outputs = tf.keras.Sequential(
layers + [tf.keras.layers.Dense(
action_dim, kernel_initializer=near_zero_orthogonal)]
)(inputs)
self.trunk = tf.keras.Model(inputs=inputs, outputs=outputs)
self.action_spec = action_spec
self.action_mean = tf.constant(
(action_spec.maximum + action_spec.minimum) / 2.0, dtype=tf.float32)
self.action_scale = tf.constant(
(action_spec.maximum - action_spec.minimum) / 2.0, dtype=tf.float32)
self.eps = eps
class MixtureGuassianPolicy(BasePolicy):
"""Gaussian policy with TanH squashing."""
def __init__(self, state_dim,
action_spec,
hidden_dims = (256, 256),
num_components = 5,
encoder = None):
super().__init__(
state_dim,
num_components * action_spec.shape[0] * 3,
action_spec,
hidden_dims=hidden_dims)
self._num_components = num_components
self.encoder = encoder
self.n_actions = action_spec.shape[0]
def _get_dist_and_mode(
self,
states,
out = None,
stddev = 1.0):
"""Returns a tf.Distribution for given states modes of this distribution.
Args:
states: Batch of states.
out: Batch of neural net outputs.
stddev: Standard deviation of sampling distribution.
"""
if self.encoder is not None:
states = self.encoder(states)
if out is None:
out = self.trunk(states)
logits, mu, log_std = tf.split(out, num_or_size_splits=3, axis=1)
log_std = tf.clip_by_value(log_std, LOG_STD_MIN, LOG_STD_MAX)
std = tf.exp(log_std)
shape = [tf.shape(std)[0], self.n_actions, self._num_components]
logits = tf.reshape(logits, shape)
mu = tf.reshape(mu, shape)
std = tf.reshape(std, shape)
components_distribution = tfd.TransformedDistribution(
tfd.Normal(loc=mu, scale=std),
tfp.bijectors.Chain([
tfp.bijectors.Shift(
shift=tf.transpose(
tf.stack(self._num_components * [self.action_mean]))),
tfp.bijectors.Scale(
scale=tf.transpose(
tf.stack(self._num_components * [self.action_scale]))),
tfp.bijectors.Tanh(),
]))
distribution = tfd.MixtureSameFamily(
mixture_distribution=tfd.Categorical(logits=logits),
components_distribution=components_distribution)
return tfd.Independent(distribution)
@tf.function
def call(
self,
states,
out = None,
sample = False,
with_log_probs = False
):
"""Computes actions for given inputs.
Args:
states: Batch of states.
out: Batch of neural net outputs.
sample: Whether to sample actions.
with_log_probs: Whether to return log probability of sampled actions.
Returns:
Sampled actions.
"""
if self.encoder is not None:
states = self.encoder(states)
if sample:
dist = self._get_dist_and_mode(states, out)
else:
dist = self._get_dist_and_mode(states, out, stddev=0.0)
actions = dist.sample()
if with_log_probs:
return actions, dist.log_prob(actions)
else:
return actions
@tf.function
def log_probs(
self,
states,
actions,
out = None,
with_entropy = False
):
actions = tf.clip_by_value(actions, self.action_spec.minimum + self.eps,
self.action_spec.maximum - self.eps)
dist = self._get_dist_and_mode(states, out)
sampled_actions = dist.sample()
sampled_actions = tf.clip_by_value(sampled_actions,
self.action_spec.minimum + self.eps,
self.action_spec.maximum - self.eps)
if with_entropy:
return dist.log_prob(actions), -dist.log_prob(sampled_actions)
else:
return dist.log_prob(actions)
class DiagGuassianPolicy(BasePolicy):
"""Gaussian policy with TanH squashing."""
def __init__(self, state_dim,
action_spec,
hidden_dims = (256, 256),
encoder = None):
super().__init__(state_dim, action_spec.shape[0] * 2, action_spec,
hidden_dims=hidden_dims)
self.encoder = encoder
def _get_dist_and_mode(
self,
states,
out = None,
stddev = 1.0):
"""Returns a tf.Distribution for given states modes of this distribution.
Args:
states: Batch of states.
out: Batch of neural net outputs.
stddev: Standard deviation of sampling distribution.
"""
if out is None:
out = self.trunk(states)
mu, log_std = tf.split(out, num_or_size_splits=2, axis=1)
log_std = tf.clip_by_value(log_std, LOG_STD_MIN, LOG_STD_MAX)
std = tf.exp(log_std)
dist = tfd.TransformedDistribution(
tfd.MultivariateNormalDiag(loc=mu, scale_diag=std * stddev),
tfp.bijectors.Chain([
tfp.bijectors.Shift(shift=self.action_mean),
tfp.bijectors.Scale(scale=self.action_scale),
tfp.bijectors.Tanh(),
]))
return dist
@tf.function
def call(
self,
states,
out = None,
sample = False,
with_log_probs = False
):
"""Computes actions for given inputs.
Args:
states: Batch of states.
out: Batch of neural net outputs.
sample: Whether to sample actions.
with_log_probs: Whether to return log probability of sampled actions.
Returns:
Sampled actions.
"""
if self.encoder is not None:
states = self.encoder(states)
if sample:
dist = self._get_dist_and_mode(states, out)
else:
dist = self._get_dist_and_mode(states, out, stddev=0.0)
actions = dist.sample()
if with_log_probs:
return actions, dist.log_prob(actions)
else:
return actions
@tf.function
def log_probs(
self,
states,
actions,
out = None,
with_entropy = False
):
if self.encoder is not None:
states = self.encoder(states)
actions = tf.clip_by_value(actions, self.action_spec.minimum + self.eps,
self.action_spec.maximum - self.eps)
dist = self._get_dist_and_mode(states, out)
sampled_actions = dist.sample()
sampled_actions = tf.clip_by_value(sampled_actions,
self.action_spec.minimum + self.eps,
self.action_spec.maximum - self.eps)
if with_entropy:
return dist.log_prob(actions), -dist.log_prob(sampled_actions)
else:
return dist.log_prob(actions)
@property
def trainable_variables(self):
tvars = super(DiagGuassianPolicy, self).trainable_variables
if self.encoder is None:
return tvars
else:
# Remove the encoder conv2d variables (Policy shouldn't update the conv2d
# vars). Note that a call to stop_gradient on the fprop isn't enough to
# ensure that this is the case, this is because conv2d vars are shared
# with the critic and so they can get updated when bpropping through the
# critic to minimze the actor loss.
encoder_variables = object_identity.ObjectIdentitySet(
self.encoder.conv_stack.trainable_variables)
return [v for v in tvars if v not in encoder_variables]
class DeterministicPolicy(BasePolicy):
"""Deterministic policy with TanH squashing."""
def __init__(self, state_dim, action_spec,
stddev):
"""Creates a deterministic policy.
Args:
state_dim: State size.
action_spec: Action spec.
stddev: Noise scale.
"""
super().__init__(state_dim, action_spec.shape[0], action_spec)
self._noise = tfd.Normal(loc=0.0, scale=stddev)
@tf.function
def call(
self,
states,
sample = False
):
"""Computes actions for given inputs.
Args:
states: Batch of states.
sample: Whether to sample actions.
Returns:
Mode actions, sampled actions.
"""
actions = tf.nn.tanh(self.trunk(states))
if sample:
actions = actions + self._noise.sample(actions.shape)
actions = tf.clip_by_value(actions, -1.0, 1.0)
return (actions + self.action_mean) * self.action_scale
class CVAEPolicyPixels(BasePolicy):
"""Conditional variational autoencoder."""
def __init__(self, state_dim, action_spec, latent_dim, encoder):
"""Creates an actor.
Args:
state_dim: State size.
action_spec: Action spec.
latent_dim: Size of latent space.
encoder: optional encoder
Returns:
None
"""
action_dim = action_spec.shape[0]
super().__init__(state_dim, action_dim, action_spec)
del self.trunk
del self.eps
self.latent_dim = latent_dim
relu_gain = tf.math.sqrt(2.0)
relu_orthogonal = tf.keras.initializers.Orthogonal(relu_gain)
self.obs_encoder = encoder
latent_state_dim = 50 # dimension of self.obs_encoders's output
self.encoder = tf.keras.Sequential([
tf.keras.layers.Dense(
750,
input_dim=latent_state_dim + action_dim,
activation='relu',
kernel_initializer=relu_orthogonal),
tf.keras.layers.Dense(
latent_dim + latent_dim, kernel_initializer='orthogonal'),
])
self.decoder = tf.keras.Sequential([
tf.keras.layers.Dense(
750,
input_dim=latent_state_dim + latent_dim,
activation='relu',
kernel_initializer=relu_orthogonal),
tf.keras.layers.Dense(
750, activation='relu', kernel_initializer=relu_orthogonal),
tf.keras.layers.Dense(action_dim, kernel_initializer='orthogonal'),
])
@tf.function
def sample(self, states):
eps = tf.random.normal(shape=(tf.shape(states)[0], self.latent_dim))
return self.decode(states, eps)
def encode(self, states, actions):
phi_s = self.obs_encoder(states)
inputs = tf.concat([phi_s, actions], -1)
mean, logvar = tf.split(self.encoder(inputs),
num_or_size_splits=2, axis=1)
logvar = tf.clip_by_value(logvar, -4, 15)
return mean, logvar
def reparameterize(self, mean, logvar):
eps = tf.random.normal(shape=tf.shape(mean))
return eps * tf.exp(logvar * 0.5) + mean
def decode(self, states, z):
phi_s = tf.stop_gradient(self.obs_encoder(states))
inputs = tf.concat([phi_s, z], -1)
outputs = self.decoder(inputs)
outputs = tf.tanh(outputs)
return (outputs + self.action_mean) * self.action_scale
@tf.function
def call(self,
states,
sample = True):
"""Computes actions for given inputs.
Args:
states: Batch of states.
sample: Whether to sample actions.
Returns:
Mode actions, sampled actions.
"""
assert sample, 'CVAE cannot be called without sampling'
return self.sample(states)
class CategoricalPolicy(BasePolicy):
"""Categorical policy."""
def __init__(self, state_dim,
action_spec,
hidden_dims = (256, 256),
encoder = None):
super().__init__(state_dim, action_spec.maximum.item()+1, action_spec,
hidden_dims=hidden_dims)
self.encoder = encoder
def _get_dist_and_mode(
self,
states,
out = None,
stddev = 1.0):
"""Returns a tf.Distribution for given states modes of this distribution.
Args:
states: Batch of states.
out: Batch of neural net outputs.
stddev: Batch of stddevs
Returns:
dist
"""
if out is None:
out = self.trunk(states)
dist = tfd.Categorical(logits=out)
# dist = tfa_d.shifted_categorical.ShiftedCategorical(logits=out,shift=0)
return dist
@tf.function
def call(
self,
states,
out = None,
sample = False,
with_log_probs = False
):
"""Computes actions for given inputs.
Args:
states: Batch of states.
out: Batch of neural net outputs.
sample: Whether to sample actions.
with_log_probs: Whether to return log probability of sampled actions.
Returns:
Sampled actions.
"""
if self.encoder is not None:
states = self.encoder(states)
dist = self._get_dist_and_mode(states, out)
if sample:
# actions ~ pi(phi(s))
actions = dist.sample()
else:
# argmax action
actions = dist.mode()
if with_log_probs:
return actions, dist.log_prob(actions)
else:
return actions
@tf.function
def log_probs(
self,
states,
actions,
out = None,
with_entropy = False
):
if self.encoder is not None:
states = self.encoder(states)
dist = self._get_dist_and_mode(states, out)
sampled_actions = dist.sample()
if with_entropy:
return dist.log_prob(actions), -dist.log_prob(sampled_actions)
else:
return dist.log_prob(actions)
@property
def trainable_variables(self):
tvars = super(CategoricalPolicy, self).trainable_variables
# if self.encoder is None:
# return tvars
# else:
# # Remove the encoder conv2d variables (Policy shouldn't update the conv
# # vars). Note that a call to stop_gradient on the fprop isn't enough to
# # ensure that this is the case, this is because conv2d vars are shared
# # with the critic and so they can get updated when bpropping through the
# # critic to minimze the actor loss.
# encoder_variables = object_identity.ObjectIdentitySet(
# self.encoder.conv_stack.trainable_variables)
return tvars
class CVAEPolicyPixelsDiscrete(BasePolicy):
"""Conditional variational autoencoder."""
def __init__(self, state_dim, action_spec, latent_dim, encoder):
"""Creates an actor.
Args:
state_dim: State size.
action_spec: Action spec.
latent_dim: Size of latent space.
encoder: optional encoder arg
Returns:
None
"""
action_dim = action_spec.maximum.item()+1
super().__init__(state_dim, action_dim, action_spec)
del self.trunk
del self.eps
self.latent_dim = latent_dim
relu_gain = tf.math.sqrt(2.0)
relu_orthogonal = tf.keras.initializers.Orthogonal(relu_gain)
self.obs_encoder = encoder
latent_state_dim = 50 # dimension of self.obs_encoders's output
self.encoder = tf.keras.Sequential([
tf.keras.layers.Dense(
750,
input_dim=latent_state_dim + action_dim,
activation='relu',
kernel_initializer=relu_orthogonal),
tf.keras.layers.Dense(
latent_dim + latent_dim, kernel_initializer='orthogonal'),
])
self.decoder = tf.keras.Sequential([
tf.keras.layers.Dense(
750,
input_dim=latent_state_dim + latent_dim,
activation='relu',
kernel_initializer=relu_orthogonal),
tf.keras.layers.Dense(
750, activation='relu', kernel_initializer=relu_orthogonal),
tf.keras.layers.Dense(action_dim, kernel_initializer='orthogonal'),
])
@tf.function
def sample(self, states):
eps = tf.random.normal(shape=(tf.shape(states)[0], self.latent_dim))
return self.decode(states, eps)
def encode(self, states, actions):
phi_s = self.obs_encoder(states)
inputs = tf.concat([phi_s, actions], -1)
mean, logvar = tf.split(self.encoder(inputs),
num_or_size_splits=2, axis=1)
logvar = tf.clip_by_value(logvar, -4, 15)
return mean, logvar
def reparameterize(self, mean, logvar):
eps = tf.random.normal(shape=tf.shape(mean))
return eps * tf.exp(logvar * 0.5) + mean
def decode(self, states, z):
phi_s = tf.stop_gradient(self.obs_encoder(states))
inputs = tf.concat([phi_s, z], -1)
outputs = self.decoder(inputs)
return outputs
|
py | b411bd04cddce8b7d1dc5950f9abbdb26f9e605d | # Copyright (c) 2004-2012 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:[email protected]
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""diagram objects
"""
from logilab import astng
from pylint.pyreverse.utils import is_interface, FilterMixIn
def set_counter(value):
"""Figure counter (re)set"""
Figure._UID_COUNT = value
class Figure:
"""base class for counter handling"""
_UID_COUNT = 0
def __init__(self):
Figure._UID_COUNT += 1
self.fig_id = Figure._UID_COUNT
class Relationship(Figure):
"""a relation ship from an object in the diagram to another
"""
def __init__(self, from_object, to_object, relation_type, name=None):
Figure.__init__(self)
self.from_object = from_object
self.to_object = to_object
self.type = relation_type
self.name = name
class DiagramEntity(Figure):
"""a diagram object, i.e. a label associated to an astng node
"""
def __init__(self, title='No name', node=None):
Figure.__init__(self)
self.title = title
self.node = node
class ClassDiagram(Figure, FilterMixIn):
"""main class diagram handling
"""
TYPE = 'class'
def __init__(self, title, mode):
FilterMixIn.__init__(self, mode)
Figure.__init__(self)
self.title = title
self.objects = []
self.relationships = {}
self._nodes = {}
self.depends = []
def add_relationship(self, from_object, to_object,
relation_type, name=None):
"""create a relation ship
"""
rel = Relationship(from_object, to_object, relation_type, name)
self.relationships.setdefault(relation_type, []).append(rel)
def get_relationship(self, from_object, relation_type):
"""return a relation ship or None
"""
for rel in self.relationships.get(relation_type, ()):
if rel.from_object is from_object:
return rel
raise KeyError(relation_type)
def get_attrs(self, node):
"""return visible attributes, possibly with class name"""
attrs = []
for node_name, ass_nodes in node.instance_attrs_type.items() + \
node.locals_type.items():
if not self.show_attr(node_name):
continue
names = self.class_names(ass_nodes)
if names:
node_name = "%s : %s" % (node_name, ", ".join(names))
attrs.append(node_name)
return attrs
def get_methods(self, node):
"""return visible methods"""
return [m for m in node.values()
if isinstance(m, astng.Function) and self.show_attr(m.name)]
def add_object(self, title, node):
"""create a diagram object
"""
assert node not in self._nodes
ent = DiagramEntity(title, node)
self._nodes[node] = ent
self.objects.append(ent)
def class_names(self, nodes):
"""return class names if needed in diagram"""
names = []
for ass_node in nodes:
if isinstance(ass_node, astng.Instance):
ass_node = ass_node._proxied
if isinstance(ass_node, astng.Class) \
and hasattr(ass_node, "name") and not self.has_node(ass_node):
if ass_node.name not in names:
ass_name = ass_node.name
names.append(ass_name)
return names
def nodes(self):
"""return the list of underlying nodes
"""
return self._nodes.keys()
def has_node(self, node):
"""return true if the given node is included in the diagram
"""
return node in self._nodes
def object_from_node(self, node):
"""return the diagram object mapped to node
"""
return self._nodes[node]
def classes(self):
"""return all class nodes in the diagram"""
return [o for o in self.objects if isinstance(o.node, astng.Class)]
def classe(self, name):
"""return a class by its name, raise KeyError if not found
"""
for klass in self.classes():
if klass.node.name == name:
return klass
raise KeyError(name)
def extract_relationships(self):
"""extract relation ships between nodes in the diagram
"""
for obj in self.classes():
node = obj.node
obj.attrs = self.get_attrs(node)
obj.methods = self.get_methods(node)
# shape
if is_interface(node):
obj.shape = 'interface'
else:
obj.shape = 'class'
# inheritance link
for par_node in node.ancestors(recurs=False):
try:
par_obj = self.object_from_node(par_node)
self.add_relationship(obj, par_obj, 'specialization')
except KeyError:
continue
# implements link
for impl_node in node.implements:
try:
impl_obj = self.object_from_node(impl_node)
self.add_relationship(obj, impl_obj, 'implements')
except KeyError:
continue
# associations link
for name, values in node.instance_attrs_type.items() + \
node.locals_type.items():
for value in values:
if value is astng.YES:
continue
if isinstance( value, astng.Instance):
value = value._proxied
try:
ass_obj = self.object_from_node(value)
self.add_relationship(ass_obj, obj, 'association', name)
except KeyError:
continue
class PackageDiagram(ClassDiagram):
"""package diagram handling
"""
TYPE = 'package'
def modules(self):
"""return all module nodes in the diagram"""
return [o for o in self.objects if isinstance(o.node, astng.Module)]
def module(self, name):
"""return a module by its name, raise KeyError if not found
"""
for mod in self.modules():
if mod.node.name == name:
return mod
raise KeyError(name)
def get_module(self, name, node):
"""return a module by its name, looking also for relative imports;
raise KeyError if not found
"""
for mod in self.modules():
mod_name = mod.node.name
if mod_name == name:
return mod
#search for fullname of relative import modules
package = node.root().name
if mod_name == "%s.%s" % (package, name):
return mod
if mod_name == "%s.%s" % (package.rsplit('.', 1)[0], name):
return mod
raise KeyError(name)
def add_from_depend(self, node, from_module):
"""add dependencies created by from-imports
"""
mod_name = node.root().name
obj = self.module( mod_name )
if from_module not in obj.node.depends:
obj.node.depends.append(from_module)
def extract_relationships(self):
"""extract relation ships between nodes in the diagram
"""
ClassDiagram.extract_relationships(self)
for obj in self.classes():
# ownership
try:
mod = self.object_from_node(obj.node.root())
self.add_relationship(obj, mod, 'ownership')
except KeyError:
continue
for obj in self.modules():
obj.shape = 'package'
# dependencies
for dep_name in obj.node.depends:
try:
dep = self.get_module(dep_name, obj.node)
except KeyError:
continue
self.add_relationship(obj, dep, 'depends')
|
py | b411bd6d8f77d6364135669100752d9fee4d9719 | import os
from .base import *
DEBUG = os.getenv('DEBUG') == 'enable'
SECRET_KEY = os.environ.get('SECRET_KEY')
ALLOWED_HOSTS = ['*']
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_PASSWORD'),
'HOST': os.environ.get('DB_HOST'),
'PORT': os.environ.get('DB_PORT'),
'CONN_MAX_AGE': int(os.getenv('DB_CONN_MAX_AGE', '0')),
'OPTIONS': {
'sslmode': os.environ.get('DB_SSL_MODE', 'require')
}
}
}
WAGTAILTRANSFER_SECRET_KEY = os.environ.get('WAGTAILTRANSFER_SECRET_KEY')
WAGTAILTRANSFER_SOURCES = {
os.environ.get('WAGTAILTRANSFER_SOURCE_NAME', 'default'): {
'BASE_URL': os.environ.get('WAGTAILTRANSFER_SOURCE_BASE_URL'),
'SECRET_KEY': os.environ.get('WAGTAILTRANSFER_SOURCE_SECRET_KEY'),
},
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
},
'root': {
'handlers': ['console'],
'level': os.getenv('LOG_LEVEL', 'INFO')
},
}
SITE_VERSION = '2.0.20'
try:
from .local import *
except ImportError:
pass
|
py | b411bfda5e68a19ae4d564edc060027a01280b67 | """
EXAMPLE - HTTP Controller referenced from example swagger spec.
"""
import flask
import logging
from typing import Union, Dict, List, Tuple, Optional, Any # noqa # pylint: disable=unused-import
from functools import wraps
import threading
import copy
from tackle.rest_api import wrapper_util
JSONIterableType = Union[Dict[str, Any], List[Any]]
JSONType = Union[str, int, float, bool, None, JSONIterableType]
def api_key_auth(key, required_scopes=None):
"""
Function pointed to by x-apikeyInfoFunc in the swagger security definitions.
"""
if key is not None:
# Pretty much a null implementation.
return {'sub': 'unknown'}
else:
return None
def get_auth_token():
"""
Retrieve the auth token provided in the request header. Allow both AUTH_TOKEN and X-Auth-Token headers.
"""
auth_token = flask.request.headers.get('AUTH_TOKEN')
if auth_token is None:
auth_token = flask.request.headers.get('X-Auth-Token')
return auth_token
def get_caller_name():
"""
Retrieve the caller name provided in the request header.
"""
caller_name = flask.request.headers.get('X-Caller')
return caller_name
controller_decorator_call_count = 0
controller_decorator_call_count_lock = threading.Lock()
def controller_decorator(f):
"""
Decorator to add usage header to response and to log info on the controller.
"""
@wraps(f)
def decorated_f(*args, **kwargs):
global controller_decorator_call_count
with controller_decorator_call_count_lock:
controller_decorator_call_count += 1
local_controller_decorator_call_count = copy.deepcopy(controller_decorator_call_count)
logging.info(f"flask_controller_request: ({local_controller_decorator_call_count}) {f.__name__} <- {str(args)} {str(kwargs)}")
# request_data = flask.request.data
# remote_addr = flask.request.remote_addr
# logging.info(f'flask_controller_request (raw data): "{request_data}" from {remote_addr}')
response_json, response_code = f(*args, **kwargs)
call_count_tuple = wrapper_util.auth_token_call_cache.get(get_auth_token()) # count, limit
if call_count_tuple is not None:
if call_count_tuple[1] is None:
call_count_remaining = -1 # Implies unlimited remaining.
else:
call_count_remaining = max(call_count_tuple[1] - call_count_tuple[0], 0) # Don't return a value <0
else:
call_count_remaining = 0 # Zero remaining; auth token not found?
response_json_str = str(response_json)
if len(response_json_str) > 300:
response_json_str = response_json_str[:297] + '...'
logging.info(f"flask_controller_response: ({local_controller_decorator_call_count}) {f.__name__} -> "
f"{str((response_json_str, response_code, {'X-RateLimit-Remaining': call_count_remaining}))}\n")
return response_json, response_code, {"X-RateLimit-Remaining": call_count_remaining}
return decorated_f
|
py | b411c02ae0d7f1597be94992269f6f82d29767a2 | #!/usr/bin/python
"""
Application Layer
"""
from time import time as _time
from .debugging import ModuleLogger, DebugContents, bacpypes_debugging
from .comm import Client, ServiceAccessPoint, ApplicationServiceElement
from .task import OneShotTask
from .pdu import Address
from .apdu import encode_max_segments_accepted, decode_max_segments_accepted, \
encode_max_apdu_length_accepted, decode_max_apdu_length_accepted, \
AbortPDU, AbortReason, ComplexAckPDU, \
ConfirmedRequestPDU, Error, ErrorPDU, RejectPDU, SegmentAckPDU, \
SimpleAckPDU, UnconfirmedRequestPDU, apdu_types, \
unconfirmed_request_types, confirmed_request_types, complex_ack_types, \
error_types
from .errors import RejectException, AbortException, UnrecognizedService
# some debugging
_debug = 0
_log = ModuleLogger(globals())
#
# SSM - Segmentation State Machine
#
# transaction states
IDLE = 0
SEGMENTED_REQUEST = 1
AWAIT_CONFIRMATION = 2
AWAIT_RESPONSE = 3
SEGMENTED_RESPONSE = 4
SEGMENTED_CONFIRMATION = 5
COMPLETED = 6
ABORTED = 7
class SSM(OneShotTask, DebugContents):
transactionLabels = ['IDLE'
, 'SEGMENTED_REQUEST', 'AWAIT_CONFIRMATION', 'AWAIT_RESPONSE'
, 'SEGMENTED_RESPONSE', 'SEGMENTED_CONFIRMATION', 'COMPLETED', 'ABORTED'
]
_debug_contents = ('ssmSAP', 'localDevice', 'device_info', 'invokeID'
, 'state', 'segmentAPDU', 'segmentSize', 'segmentCount', 'maxSegmentsAccepted'
, 'retryCount', 'segmentRetryCount', 'sentAllSegments', 'lastSequenceNumber'
, 'initialSequenceNumber', 'actualWindowSize', 'proposedWindowSize'
)
def __init__(self, sap, pdu_address):
"""Common parts for client and server segmentation."""
if _debug: SSM._debug("__init__ %r %r", sap, pdu_address)
OneShotTask.__init__(self)
self.ssmSAP = sap # service access point
# save the address and get the device information
self.pdu_address = pdu_address
self.device_info = sap.deviceInfoCache.get_device_info(pdu_address)
self.invokeID = None # invoke ID
self.state = IDLE # initial state
self.segmentAPDU = None # refers to request or response
self.segmentSize = None # how big the pieces are
self.segmentCount = None
self.retryCount = None
self.segmentRetryCount = None
self.sentAllSegments = None
self.lastSequenceNumber = None
self.initialSequenceNumber = None
self.actualWindowSize = None
# local device object provides these or SAP provides defaults, make
# copies here so they are consistent throughout the transaction but
# they could change from one transaction to the next
self.numberOfApduRetries = getattr(sap.localDevice, 'numberOfApduRetries', sap.numberOfApduRetries)
self.apduTimeout = getattr(sap.localDevice, 'apduTimeout', sap.apduTimeout)
self.segmentationSupported = getattr(sap.localDevice, 'segmentationSupported', sap.segmentationSupported)
self.segmentTimeout = getattr(sap.localDevice, 'segmentTimeout', sap.segmentTimeout)
self.maxSegmentsAccepted = getattr(sap.localDevice, 'maxSegmentsAccepted', sap.maxSegmentsAccepted)
self.maxApduLengthAccepted = getattr(sap.localDevice, 'maxApduLengthAccepted', sap.maxApduLengthAccepted)
def start_timer(self, msecs):
if _debug: SSM._debug("start_timer %r", msecs)
# if this is active, pull it
if self.isScheduled:
if _debug: SSM._debug(" - is scheduled")
self.suspend_task()
# now install this
self.install_task(delta=msecs / 1000.0)
def stop_timer(self):
if _debug: SSM._debug("stop_timer")
# if this is active, pull it
if self.isScheduled:
if _debug: SSM._debug(" - is scheduled")
self.suspend_task()
def restart_timer(self, msecs):
if _debug: SSM._debug("restart_timer %r", msecs)
# if this is active, pull it
if self.isScheduled:
if _debug: SSM._debug(" - is scheduled")
self.suspend_task()
# now install this
self.install_task(delta=msecs / 1000.0)
def set_state(self, newState, timer=0):
"""This function is called when the derived class wants to change state."""
if _debug: SSM._debug("set_state %r (%s) timer=%r", newState, SSM.transactionLabels[newState], timer)
# make sure we have a correct transition
if (self.state == COMPLETED) or (self.state == ABORTED):
e = RuntimeError("invalid state transition from %s to %s" % (SSM.transactionLabels[self.state], SSM.transactionLabels[newState]))
SSM._exception(e)
raise e
# stop any current timer
self.stop_timer()
# make the change
self.state = newState
# if another timer should be started, start it
if timer:
self.start_timer(timer)
def set_segmentation_context(self, apdu):
"""This function is called to set the segmentation context."""
if _debug: SSM._debug("set_segmentation_context %s", repr(apdu))
# set the context
self.segmentAPDU = apdu
def get_segment(self, indx):
"""This function returns an APDU coorisponding to a particular
segment of a confirmed request or complex ack. The segmentAPDU
is the context."""
if _debug: SSM._debug("get_segment %r", indx)
# check for no context
if not self.segmentAPDU:
raise RuntimeError("no segmentation context established")
# check for invalid segment number
if indx >= self.segmentCount:
raise RuntimeError("invalid segment number {0}, APDU has {1} segments".format(indx, self.segmentCount))
if self.segmentAPDU.apduType == ConfirmedRequestPDU.pduType:
if _debug: SSM._debug(" - confirmed request context")
segAPDU = ConfirmedRequestPDU(self.segmentAPDU.apduService)
segAPDU.apduMaxSegs = encode_max_segments_accepted(self.maxSegmentsAccepted)
segAPDU.apduMaxResp = encode_max_apdu_length_accepted(self.maxApduLengthAccepted)
segAPDU.apduInvokeID = self.invokeID
# segmented response accepted?
segAPDU.apduSA = self.segmentationSupported in ('segmentedReceive', 'segmentedBoth')
if _debug: SSM._debug(" - segmented response accepted: %r", segAPDU.apduSA)
elif self.segmentAPDU.apduType == ComplexAckPDU.pduType:
if _debug: SSM._debug(" - complex ack context")
segAPDU = ComplexAckPDU(self.segmentAPDU.apduService, self.segmentAPDU.apduInvokeID)
else:
raise RuntimeError("invalid APDU type for segmentation context")
# maintain the the user data reference
segAPDU.pduUserData = self.segmentAPDU.pduUserData
# make sure the destination is set
segAPDU.pduDestination = self.pdu_address
# segmented message?
if (self.segmentCount != 1):
segAPDU.apduSeg = True
segAPDU.apduMor = (indx < (self.segmentCount - 1)) # more follows
segAPDU.apduSeq = indx % 256 # sequence number
# first segment sends proposed window size, rest get actual
if indx == 0:
if _debug: SSM._debug(" - proposedWindowSize: %r", self.proposedWindowSize)
segAPDU.apduWin = self.proposedWindowSize
else:
if _debug: SSM._debug(" - actualWindowSize: %r", self.actualWindowSize)
segAPDU.apduWin = self.actualWindowSize
else:
segAPDU.apduSeg = False
segAPDU.apduMor = False
# add the content
offset = indx * self.segmentSize
segAPDU.put_data( self.segmentAPDU.pduData[offset:offset+self.segmentSize] )
# success
return segAPDU
def append_segment(self, apdu):
"""This function appends the apdu content to the end of the current
APDU being built. The segmentAPDU is the context."""
if _debug: SSM._debug("append_segment %r", apdu)
# check for no context
if not self.segmentAPDU:
raise RuntimeError("no segmentation context established")
# append the data
self.segmentAPDU.put_data(apdu.pduData)
def in_window(self, seqA, seqB):
if _debug: SSM._debug("in_window %r %r", seqA, seqB)
rslt = ((seqA - seqB + 256) % 256) < self.actualWindowSize
if _debug: SSM._debug(" - rslt: %r", rslt)
return rslt
def fill_window(self, seqNum):
"""This function sends all of the packets necessary to fill
out the segmentation window."""
if _debug: SSM._debug("fill_window %r", seqNum)
if _debug: SSM._debug(" - actualWindowSize: %r", self.actualWindowSize)
for ix in range(self.actualWindowSize):
apdu = self.get_segment(seqNum + ix)
# send the message
self.ssmSAP.request(apdu)
# check for no more follows
if not apdu.apduMor:
self.sentAllSegments = True
break
bacpypes_debugging(SSM)
#
# ClientSSM - Client Segmentation State Machine
#
class ClientSSM(SSM):
def __init__(self, sap, pdu_address):
if _debug: ClientSSM._debug("__init__ %s %r", sap, pdu_address)
SSM.__init__(self, sap, pdu_address)
# initialize the retry count
self.retryCount = 0
# acquire the device info
if self.device_info:
if _debug: ClientSSM._debug(" - acquire device information")
self.ssmSAP.deviceInfoCache.acquire(self.device_info)
def set_state(self, newState, timer=0):
"""This function is called when the client wants to change state."""
if _debug: ClientSSM._debug("set_state %r (%s) timer=%r", newState, SSM.transactionLabels[newState], timer)
# do the regular state change
SSM.set_state(self, newState, timer)
# when completed or aborted, remove tracking
if (newState == COMPLETED) or (newState == ABORTED):
if _debug: ClientSSM._debug(" - remove from active transactions")
self.ssmSAP.clientTransactions.remove(self)
# release the device info
if self.device_info:
if _debug: ClientSSM._debug(" - release device information")
self.ssmSAP.deviceInfoCache.release(self.device_info)
def request(self, apdu):
"""This function is called by client transaction functions when it wants
to send a message to the device."""
if _debug: ClientSSM._debug("request %r", apdu)
# make sure it has a good source and destination
apdu.pduSource = None
apdu.pduDestination = self.pdu_address
# send it via the device
self.ssmSAP.request(apdu)
def indication(self, apdu):
"""This function is called after the device has bound a new transaction
and wants to start the process rolling."""
if _debug: ClientSSM._debug("indication %r", apdu)
# make sure we're getting confirmed requests
if (apdu.apduType != ConfirmedRequestPDU.pduType):
raise RuntimeError("invalid APDU (1)")
# save the request and set the segmentation context
self.set_segmentation_context(apdu)
# if the max apdu length of the server isn't known, assume that it
# is the same size as our own and will be the segment size
if (not self.device_info) or (self.device_info.maxApduLengthAccepted is None):
self.segmentSize = self.maxApduLengthAccepted
# if the max npdu length of the server isn't known, assume that it
# is the same as the max apdu length accepted
elif self.device_info.maxNpduLength is None:
self.segmentSize = self.device_info.maxApduLengthAccepted
# the segment size is the minimum of the size of the largest packet
# that can be delivered to the server and the largest it can accept
else:
self.segmentSize = min(self.device_info.maxNpduLength, self.device_info.maxApduLengthAccepted)
if _debug: ClientSSM._debug(" - segment size: %r", self.segmentSize)
# save the invoke ID
self.invokeID = apdu.apduInvokeID
if _debug: ClientSSM._debug(" - invoke ID: %r", self.invokeID)
# compute the segment count
if not apdu.pduData:
# always at least one segment
self.segmentCount = 1
else:
# split into chunks, maybe need one more
self.segmentCount, more = divmod(len(apdu.pduData), self.segmentSize)
if more:
self.segmentCount += 1
if _debug: ClientSSM._debug(" - segment count: %r", self.segmentCount)
# make sure we support segmented transmit if we need to
if self.segmentCount > 1:
if self.segmentationSupported not in ('segmentedTransmit', 'segmentedBoth'):
if _debug: ClientSSM._debug(" - local device can't send segmented requests")
abort = self.abort(AbortReason.segmentationNotSupported)
self.response(abort)
return
if not self.device_info:
if _debug: ClientSSM._debug(" - no server info for segmentation support")
elif self.device_info.segmentationSupported not in ('segmentedReceive', 'segmentedBoth'):
if _debug: ClientSSM._debug(" - server can't receive segmented requests")
abort = self.abort(AbortReason.segmentationNotSupported)
self.response(abort)
return
# make sure we dont exceed the number of segments in our request
# that the server said it was willing to accept
if not self.device_info:
if _debug: ClientSSM._debug(" - no server info for maximum number of segments")
elif not self.device_info.maxSegmentsAccepted:
if _debug: ClientSSM._debug(" - server doesn't say maximum number of segments")
elif self.segmentCount > self.device_info.maxSegmentsAccepted:
if _debug: ClientSSM._debug(" - server can't receive enough segments")
abort = self.abort(AbortReason.apduTooLong)
self.response(abort)
return
# send out the first segment (or the whole thing)
if self.segmentCount == 1:
# unsegmented
self.sentAllSegments = True
self.retryCount = 0
self.set_state(AWAIT_CONFIRMATION, self.apduTimeout)
else:
# segmented
self.sentAllSegments = False
self.retryCount = 0
self.segmentRetryCount = 0
self.initialSequenceNumber = 0
self.actualWindowSize = None # segment ack will set value
self.set_state(SEGMENTED_REQUEST, self.segmentTimeout)
# deliver to the device
self.request(self.get_segment(0))
def response(self, apdu):
"""This function is called by client transaction functions when they want
to send a message to the application."""
if _debug: ClientSSM._debug("response %r", apdu)
# make sure it has a good source and destination
apdu.pduSource = self.pdu_address
apdu.pduDestination = None
# send it to the application
self.ssmSAP.sap_response(apdu)
def confirmation(self, apdu):
"""This function is called by the device for all upstream messages related
to the transaction."""
if _debug: ClientSSM._debug("confirmation %r", apdu)
if self.state == SEGMENTED_REQUEST:
self.segmented_request(apdu)
elif self.state == AWAIT_CONFIRMATION:
self.await_confirmation(apdu)
elif self.state == SEGMENTED_CONFIRMATION:
self.segmented_confirmation(apdu)
else:
raise RuntimeError("invalid state")
def process_task(self):
"""This function is called when something has taken too long."""
if _debug: ClientSSM._debug("process_task")
if self.state == SEGMENTED_REQUEST:
self.segmented_request_timeout()
elif self.state == AWAIT_CONFIRMATION:
self.await_confirmation_timeout()
elif self.state == SEGMENTED_CONFIRMATION:
self.segmented_confirmation_timeout()
elif self.state == COMPLETED:
pass
elif self.state == ABORTED:
pass
else:
e = RuntimeError("invalid state")
ClientSSM._exception("exception: %r", e)
raise e
def abort(self, reason):
"""This function is called when the transaction should be aborted."""
if _debug: ClientSSM._debug("abort %r", reason)
# change the state to aborted
self.set_state(ABORTED)
# build an abort PDU to return
abort_pdu = AbortPDU(False, self.invokeID, reason)
# return it
return abort_pdu
def segmented_request(self, apdu):
"""This function is called when the client is sending a segmented request
and receives an apdu."""
if _debug: ClientSSM._debug("segmented_request %r", apdu)
# server is ready for the next segment
if apdu.apduType == SegmentAckPDU.pduType:
if _debug: ClientSSM._debug(" - segment ack")
# actual window size is provided by server
self.actualWindowSize = apdu.apduWin
# duplicate ack received?
if not self.in_window(apdu.apduSeq, self.initialSequenceNumber):
if _debug: ClientSSM._debug(" - not in window")
self.restart_timer(self.segmentTimeout)
# final ack received?
elif self.sentAllSegments:
if _debug: ClientSSM._debug(" - all done sending request")
self.set_state(AWAIT_CONFIRMATION, self.apduTimeout)
# more segments to send
else:
if _debug: ClientSSM._debug(" - more segments to send")
self.initialSequenceNumber = (apdu.apduSeq + 1) % 256
self.segmentRetryCount = 0
self.fill_window(self.initialSequenceNumber)
self.restart_timer(self.segmentTimeout)
# simple ack
elif (apdu.apduType == SimpleAckPDU.pduType):
if _debug: ClientSSM._debug(" - simple ack")
if not self.sentAllSegments:
abort = self.abort(AbortReason.invalidApduInThisState)
self.request(abort) # send it to the device
self.response(abort) # send it to the application
else:
self.set_state(COMPLETED)
self.response(apdu)
elif (apdu.apduType == ComplexAckPDU.pduType):
if _debug: ClientSSM._debug(" - complex ack")
if not self.sentAllSegments:
abort = self.abort(AbortReason.invalidApduInThisState)
self.request(abort) # send it to the device
self.response(abort) # send it to the application
elif not apdu.apduSeg:
# ack is not segmented
self.set_state(COMPLETED)
self.response(apdu)
else:
# set the segmented response context
self.set_segmentation_context(apdu)
# minimum of what the server is proposing and this client proposes
self.actualWindowSize = min(apdu.apduWin, self.proposedWindowSize)
self.lastSequenceNumber = 0
self.initialSequenceNumber = 0
self.set_state(SEGMENTED_CONFIRMATION, self.segmentTimeout)
# some kind of problem
elif (apdu.apduType == ErrorPDU.pduType) or (apdu.apduType == RejectPDU.pduType) or (apdu.apduType == AbortPDU.pduType):
if _debug: ClientSSM._debug(" - error/reject/abort")
self.set_state(COMPLETED)
self.response = apdu
self.response(apdu)
else:
raise RuntimeError("invalid APDU (2)")
def segmented_request_timeout(self):
if _debug: ClientSSM._debug("segmented_request_timeout")
# try again
if self.segmentRetryCount < self.numberOfApduRetries:
if _debug: ClientSSM._debug(" - retry segmented request")
self.segmentRetryCount += 1
self.start_timer(self.segmentTimeout)
self.fill_window(self.initialSequenceNumber)
else:
if _debug: ClientSSM._debug(" - abort, no response from the device")
abort = self.abort(AbortReason.noResponse)
self.response(abort)
def await_confirmation(self, apdu):
if _debug: ClientSSM._debug("await_confirmation %r", apdu)
if (apdu.apduType == AbortPDU.pduType):
if _debug: ClientSSM._debug(" - server aborted")
self.set_state(ABORTED)
self.response(apdu)
elif (apdu.apduType == SimpleAckPDU.pduType) or (apdu.apduType == ErrorPDU.pduType) or (apdu.apduType == RejectPDU.pduType):
if _debug: ClientSSM._debug(" - simple ack, error, or reject")
self.set_state(COMPLETED)
self.response(apdu)
elif (apdu.apduType == ComplexAckPDU.pduType):
if _debug: ClientSSM._debug(" - complex ack")
# if the response is not segmented, we're done
if not apdu.apduSeg:
if _debug: ClientSSM._debug(" - unsegmented")
self.set_state(COMPLETED)
self.response(apdu)
elif self.segmentationSupported not in ('segmentedReceive', 'segmentedBoth'):
if _debug: ClientSSM._debug(" - local device can't receive segmented messages")
abort = self.abort(AbortReason.segmentationNotSupported)
self.response(abort)
elif apdu.apduSeq == 0:
if _debug: ClientSSM._debug(" - segmented response")
# set the segmented response context
self.set_segmentation_context(apdu)
self.actualWindowSize = apdu.apduWin
self.lastSequenceNumber = 0
self.initialSequenceNumber = 0
self.set_state(SEGMENTED_CONFIRMATION, self.segmentTimeout)
# send back a segment ack
segack = SegmentAckPDU( 0, 0, self.invokeID, self.initialSequenceNumber, self.actualWindowSize )
self.request(segack)
else:
if _debug: ClientSSM._debug(" - invalid APDU in this state")
abort = self.abort(AbortReason.invalidApduInThisState)
self.request(abort) # send it to the device
self.response(abort) # send it to the application
elif (apdu.apduType == SegmentAckPDU.pduType):
if _debug: ClientSSM._debug(" - segment ack(!?)")
self.restart_timer(self.segmentTimeout)
else:
raise RuntimeError("invalid APDU (3)")
def await_confirmation_timeout(self):
if _debug: ClientSSM._debug("await_confirmation_timeout")
if self.retryCount < self.numberOfApduRetries:
if _debug: ClientSSM._debug(" - no response, try again (%d < %d)", self.retryCount, self.numberOfApduRetries)
self.retryCount += 1
# save the retry count, indication acts like the request is coming
# from the application so the retryCount gets re-initialized.
saveCount = self.retryCount
self.indication(self.segmentAPDU)
self.retryCount = saveCount
else:
if _debug: ClientSSM._debug(" - retry count exceeded")
abort = self.abort(AbortReason.noResponse)
self.response(abort)
def segmented_confirmation(self, apdu):
if _debug: ClientSSM._debug("segmented_confirmation %r", apdu)
# the only messages we should be getting are complex acks
if (apdu.apduType != ComplexAckPDU.pduType):
if _debug: ClientSSM._debug(" - complex ack required")
abort = self.abort(AbortReason.invalidApduInThisState)
self.request(abort) # send it to the device
self.response(abort) # send it to the application
return
# it must be segmented
if not apdu.apduSeg:
if _debug: ClientSSM._debug(" - must be segmented")
abort = self.abort(AbortReason.invalidApduInThisState)
self.request(abort) # send it to the device
self.response(abort) # send it to the application
return
# proper segment number
if apdu.apduSeq != (self.lastSequenceNumber + 1) % 256:
if _debug: ClientSSM._debug(" - segment %s received out of order, should be %s", apdu.apduSeq, (self.lastSequenceNumber + 1) % 256)
# segment received out of order
self.restart_timer(self.segmentTimeout)
segack = SegmentAckPDU(1, 0, self.invokeID, self.lastSequenceNumber, self.actualWindowSize)
self.request(segack)
return
# add the data
self.append_segment(apdu)
# update the sequence number
self.lastSequenceNumber = (self.lastSequenceNumber + 1) % 256
# last segment received
if not apdu.apduMor:
if _debug: ClientSSM._debug(" - no more follows")
# send a final ack
segack = SegmentAckPDU(0, 0, self.invokeID, self.lastSequenceNumber, self.actualWindowSize)
self.request(segack)
self.set_state(COMPLETED)
self.response(self.segmentAPDU)
elif apdu.apduSeq == ((self.initialSequenceNumber + self.actualWindowSize) % 256):
if _debug: ClientSSM._debug(" - last segment in the group")
self.initialSequenceNumber = self.lastSequenceNumber
self.restart_timer(self.segmentTimeout)
segack = SegmentAckPDU(0, 0, self.invokeID, self.lastSequenceNumber, self.actualWindowSize)
self.request(segack)
else:
# wait for more segments
if _debug: ClientSSM._debug(" - wait for more segments")
self.restart_timer(self.segmentTimeout)
def segmented_confirmation_timeout(self):
if _debug: ClientSSM._debug("segmented_confirmation_timeout")
abort = self.abort(AbortReason.noResponse)
self.response(abort)
bacpypes_debugging(ClientSSM)
#
# ServerSSM - Server Segmentation State Machine
#
class ServerSSM(SSM):
def __init__(self, sap, pdu_address):
if _debug: ServerSSM._debug("__init__ %s %r", sap, pdu_address)
SSM.__init__(self, sap, pdu_address)
# acquire the device info
if self.device_info:
if _debug: ServerSSM._debug(" - acquire device information")
self.ssmSAP.deviceInfoCache.acquire(self.device_info)
def set_state(self, newState, timer=0):
"""This function is called when the client wants to change state."""
if _debug: ServerSSM._debug("set_state %r (%s) timer=%r", newState, SSM.transactionLabels[newState], timer)
# do the regular state change
SSM.set_state(self, newState, timer)
# when completed or aborted, remove tracking
if (newState == COMPLETED) or (newState == ABORTED):
if _debug: ServerSSM._debug(" - remove from active transactions")
self.ssmSAP.serverTransactions.remove(self)
# release the device info
if self.device_info:
if _debug: ClientSSM._debug(" - release device information")
self.ssmSAP.deviceInfoCache.release(self.device_info)
def request(self, apdu):
"""This function is called by transaction functions to send
to the application."""
if _debug: ServerSSM._debug("request %r", apdu)
# make sure it has a good source and destination
apdu.pduSource = self.pdu_address
apdu.pduDestination = None
# send it via the device
self.ssmSAP.sap_request(apdu)
def indication(self, apdu):
"""This function is called for each downstream packet related to
the transaction."""
if _debug: ServerSSM._debug("indication %r", apdu)
if self.state == IDLE:
self.idle(apdu)
elif self.state == SEGMENTED_REQUEST:
self.segmented_request(apdu)
elif self.state == AWAIT_RESPONSE:
self.await_response(apdu)
elif self.state == SEGMENTED_RESPONSE:
self.segmented_response(apdu)
else:
if _debug: ServerSSM._debug(" - invalid state")
def response(self, apdu):
"""This function is called by transaction functions when they want
to send a message to the device."""
if _debug: ServerSSM._debug("response %r", apdu)
# make sure it has a good source and destination
apdu.pduSource = None
apdu.pduDestination = self.pdu_address
# send it via the device
self.ssmSAP.request(apdu)
def confirmation(self, apdu):
"""This function is called when the application has provided a response
and needs it to be sent to the client."""
if _debug: ServerSSM._debug("confirmation %r", apdu)
# check to see we are in the correct state
if self.state != AWAIT_RESPONSE:
if _debug: ServerSSM._debug(" - warning: not expecting a response")
# abort response
if (apdu.apduType == AbortPDU.pduType):
if _debug: ServerSSM._debug(" - abort")
self.set_state(ABORTED)
# send the response to the device
self.response(apdu)
return
# simple response
if (apdu.apduType == SimpleAckPDU.pduType) or (apdu.apduType == ErrorPDU.pduType) or (apdu.apduType == RejectPDU.pduType):
if _debug: ServerSSM._debug(" - simple ack, error, or reject")
# transaction completed
self.set_state(COMPLETED)
# send the response to the device
self.response(apdu)
return
# complex ack
if (apdu.apduType == ComplexAckPDU.pduType):
if _debug: ServerSSM._debug(" - complex ack")
# save the response and set the segmentation context
self.set_segmentation_context(apdu)
# the segment size is the minimum of the size of the largest packet
# that can be delivered to the client and the largest it can accept
if (not self.device_info) or (self.device_info.maxNpduLength is None):
self.segmentSize = self.maxApduLengthAccepted
else:
self.segmentSize = min(self.device_info.maxNpduLength, self.maxApduLengthAccepted)
if _debug: ServerSSM._debug(" - segment size: %r", self.segmentSize)
# compute the segment count
if not apdu.pduData:
# always at least one segment
self.segmentCount = 1
else:
# split into chunks, maybe need one more
self.segmentCount, more = divmod(len(apdu.pduData), self.segmentSize)
if more:
self.segmentCount += 1
if _debug: ServerSSM._debug(" - segment count: %r", self.segmentCount)
# make sure we support segmented transmit if we need to
if self.segmentCount > 1:
if _debug: ServerSSM._debug(" - segmentation required, %d segments", self.segmentCount)
# make sure we support segmented transmit
if self.segmentationSupported not in ('segmentedTransmit', 'segmentedBoth'):
if _debug: ServerSSM._debug(" - server can't send segmented responses")
abort = self.abort(AbortReason.segmentationNotSupported)
self.response(abort)
return
# make sure client supports segmented receive
if self.device_info.segmentationSupported not in ('segmentedReceive', 'segmentedBoth'):
if _debug: ServerSSM._debug(" - client can't receive segmented responses")
abort = self.abort(AbortReason.segmentationNotSupported)
self.response(abort)
return
# make sure we dont exceed the number of segments in our response
# that the device said it was willing to accept in the request
if self.segmentCount > self.maxSegmentsAccepted:
if _debug: ServerSSM._debug(" - client can't receive enough segments")
abort = self.abort(AbortReason.apduTooLong)
self.response(abort)
return
# initialize the state
self.segmentRetryCount = 0
self.initialSequenceNumber = 0
self.actualWindowSize = None
# send out the first segment (or the whole thing)
if self.segmentCount == 1:
self.response(apdu)
self.set_state(COMPLETED)
else:
self.response(self.get_segment(0))
self.set_state(SEGMENTED_RESPONSE, self.segmentTimeout)
else:
raise RuntimeError("invalid APDU (4)")
def process_task(self):
"""This function is called when the client has failed to send all of the
segments of a segmented request, the application has taken too long to
complete the request, or the client failed to ack the segments of a
segmented response."""
if _debug: ServerSSM._debug("process_task")
if self.state == SEGMENTED_REQUEST:
self.segmented_request_timeout()
elif self.state == AWAIT_RESPONSE:
self.await_response_timeout()
elif self.state == SEGMENTED_RESPONSE:
self.segmented_response_timeout()
elif self.state == COMPLETED:
pass
elif self.state == ABORTED:
pass
else:
if _debug: ServerSSM._debug("invalid state")
raise RuntimeError("invalid state")
def abort(self, reason):
"""This function is called when the application would like to abort the
transaction. There is no notification back to the application."""
if _debug: ServerSSM._debug("abort %r", reason)
# change the state to aborted
self.set_state(ABORTED)
# return an abort APDU
return AbortPDU(True, self.invokeID, reason)
def idle(self, apdu):
if _debug: ServerSSM._debug("idle %r", apdu)
# make sure we're getting confirmed requests
if not isinstance(apdu, ConfirmedRequestPDU):
raise RuntimeError("invalid APDU (5)")
# save the invoke ID
self.invokeID = apdu.apduInvokeID
if _debug: ServerSSM._debug(" - invoke ID: %r", self.invokeID)
if apdu.apduSA:
if not self.device_info:
if _debug: ServerSSM._debug(" - no client device info")
elif self.device_info.segmentationSupported == 'noSegmentation':
if _debug: ServerSSM._debug(" - client actually supports segmented receive")
self.device_info.segmentationSupported = 'segmentedReceive'
if _debug: ServerSSM._debug(" - tell the cache the info has been updated")
self.ssmSAP.deviceInfoCache.update_device_info(self.device_info)
elif self.device_info.segmentationSupported == 'segmentedTransmit':
if _debug: ServerSSM._debug(" - client actually supports both segmented transmit and receive")
self.device_info.segmentationSupported = 'segmentedBoth'
if _debug: ServerSSM._debug(" - tell the cache the info has been updated")
self.ssmSAP.deviceInfoCache.update_device_info(self.device_info)
elif self.device_info.segmentationSupported == 'segmentedReceive':
pass
elif self.device_info.segmentationSupported == 'segmentedBoth':
pass
else:
raise RuntimeError("invalid segmentation supported in device info")
# decode the maximum that the client can receive in one APDU, and if
# there is a value in the device information then use that one because
# it came from reading device object property value or from an I-Am
# message that was received
self.maxApduLengthAccepted = decode_max_apdu_length_accepted(apdu.apduMaxResp)
if self.device_info and self.device_info.maxApduLengthAccepted is not None:
if self.device_info.maxApduLengthAccepted < self.maxApduLengthAccepted:
if _debug: ServerSSM._debug(" - apduMaxResp encoding error")
else:
self.maxApduLengthAccepted = self.device_info.maxApduLengthAccepted
if _debug: ServerSSM._debug(" - maxApduLengthAccepted: %r", self.maxApduLengthAccepted)
# save the number of segments the client is willing to accept in the ack,
# if this is None then the value is unknown or more than 64
self.maxSegmentsAccepted = decode_max_segments_accepted(apdu.apduMaxSegs)
# unsegmented request
if not apdu.apduSeg:
self.set_state(AWAIT_RESPONSE, self.ssmSAP.applicationTimeout)
self.request(apdu)
return
# make sure we support segmented requests
if self.segmentationSupported not in ('segmentedReceive', 'segmentedBoth'):
abort = self.abort(AbortReason.segmentationNotSupported)
self.response(abort)
return
# save the request and set the segmentation context
self.set_segmentation_context(apdu)
# the window size is the minimum of what I would propose and what the
# device has proposed
self.actualWindowSize = min(apdu.apduWin, self.proposedWindowSize)
if _debug: ServerSSM._debug(" - actualWindowSize? min(%r, %r) -> %r", apdu.apduWin, self.proposedWindowSize, self.actualWindowSize)
# initialize the state
self.lastSequenceNumber = 0
self.initialSequenceNumber = 0
self.set_state(SEGMENTED_REQUEST, self.segmentTimeout)
# send back a segment ack
segack = SegmentAckPDU(0, 1, self.invokeID, self.initialSequenceNumber, self.actualWindowSize)
if _debug: ServerSSM._debug(" - segAck: %r", segack)
self.response(segack)
def segmented_request(self, apdu):
if _debug: ServerSSM._debug("segmented_request %r", apdu)
# some kind of problem
if (apdu.apduType == AbortPDU.pduType):
self.set_state(COMPLETED)
self.response(apdu)
return
# the only messages we should be getting are confirmed requests
elif (apdu.apduType != ConfirmedRequestPDU.pduType):
abort = self.abort(AbortReason.invalidApduInThisState)
self.request(abort) # send it to the device
self.response(abort) # send it to the application
return
# it must be segmented
elif not apdu.apduSeg:
abort = self.abort(AbortReason.invalidApduInThisState)
self.request(abort) # send it to the application
self.response(abort) # send it to the device
return
# proper segment number
if apdu.apduSeq != (self.lastSequenceNumber + 1) % 256:
if _debug: ServerSSM._debug(" - segment %d received out of order, should be %d", apdu.apduSeq, (self.lastSequenceNumber + 1) % 256)
# segment received out of order
self.restart_timer(self.segmentTimeout)
# send back a segment ack
segack = SegmentAckPDU(1, 1, self.invokeID, self.initialSequenceNumber, self.actualWindowSize)
self.response(segack)
return
# add the data
self.append_segment(apdu)
# update the sequence number
self.lastSequenceNumber = (self.lastSequenceNumber + 1) % 256
# last segment?
if not apdu.apduMor:
if _debug: ServerSSM._debug(" - no more follows")
# send back a final segment ack
segack = SegmentAckPDU(0, 1, self.invokeID, self.lastSequenceNumber, self.actualWindowSize)
self.response(segack)
# forward the whole thing to the application
self.set_state(AWAIT_RESPONSE, self.ssmSAP.applicationTimeout)
self.request(self.segmentAPDU)
elif apdu.apduSeq == ((self.initialSequenceNumber + self.actualWindowSize) % 256):
if _debug: ServerSSM._debug(" - last segment in the group")
self.initialSequenceNumber = self.lastSequenceNumber
self.restart_timer(self.segmentTimeout)
# send back a segment ack
segack = SegmentAckPDU(0, 1, self.invokeID, self.initialSequenceNumber, self.actualWindowSize)
self.response(segack)
else:
# wait for more segments
if _debug: ServerSSM._debug(" - wait for more segments")
self.restart_timer(self.segmentTimeout)
def segmented_request_timeout(self):
if _debug: ServerSSM._debug("segmented_request_timeout")
# give up
self.set_state(ABORTED)
def await_response(self, apdu):
if _debug: ServerSSM._debug("await_response %r", apdu)
if isinstance(apdu, ConfirmedRequestPDU):
if _debug: ServerSSM._debug(" - client is trying this request again")
elif isinstance(apdu, AbortPDU):
if _debug: ServerSSM._debug(" - client aborting this request")
# forward abort to the application
self.set_state(ABORTED)
self.request(apdu)
else:
raise RuntimeError("invalid APDU (6)")
def await_response_timeout(self):
"""This function is called when the application has taken too long
to respond to a clients request. The client has probably long since
given up."""
if _debug: ServerSSM._debug("await_response_timeout")
abort = self.abort(AbortReason.serverTimeout)
self.request(abort)
def segmented_response(self, apdu):
if _debug: ServerSSM._debug("segmented_response %r", apdu)
# client is ready for the next segment
if (apdu.apduType == SegmentAckPDU.pduType):
if _debug: ServerSSM._debug(" - segment ack")
# actual window size is provided by client
self.actualWindowSize = apdu.apduWin
# duplicate ack received?
if not self.in_window(apdu.apduSeq, self.initialSequenceNumber):
if _debug: ServerSSM._debug(" - not in window")
self.restart_timer(self.segmentTimeout)
# final ack received?
elif self.sentAllSegments:
if _debug: ServerSSM._debug(" - all done sending response")
self.set_state(COMPLETED)
else:
if _debug: ServerSSM._debug(" - more segments to send")
self.initialSequenceNumber = (apdu.apduSeq + 1) % 256
self.actualWindowSize = apdu.apduWin
self.segmentRetryCount = 0
self.fill_window(self.initialSequenceNumber)
self.restart_timer(self.segmentTimeout)
# some kind of problem
elif (apdu.apduType == AbortPDU.pduType):
self.set_state(COMPLETED)
self.response(apdu)
else:
raise RuntimeError("invalid APDU (7)")
def segmented_response_timeout(self):
if _debug: ServerSSM._debug("segmented_response_timeout")
# try again
if self.segmentRetryCount < self.numberOfApduRetries:
self.segmentRetryCount += 1
self.start_timer(self.segmentTimeout)
self.fill_window(self.initialSequenceNumber)
else:
# give up
self.set_state(ABORTED)
bacpypes_debugging(ServerSSM)
#
# StateMachineAccessPoint
#
class StateMachineAccessPoint(Client, ServiceAccessPoint):
def __init__(self, localDevice=None, deviceInfoCache=None, sap=None, cid=None):
if _debug: StateMachineAccessPoint._debug("__init__ localDevice=%r deviceInfoCache=%r sap=%r cid=%r", localDevice, deviceInfoCache, sap, cid)
# basic initialization
Client.__init__(self, cid)
ServiceAccessPoint.__init__(self, sap)
# save a reference to the device information cache
self.localDevice = localDevice
self.deviceInfoCache = deviceInfoCache
# client settings
self.nextInvokeID = 1
self.clientTransactions = []
# server settings
self.serverTransactions = []
# confirmed request defaults
self.numberOfApduRetries = 3
self.apduTimeout = 3000
self.maxApduLengthAccepted = 1024
# segmentation defaults
self.segmentationSupported = 'noSegmentation'
self.segmentTimeout = 1500
self.maxSegmentsAccepted = 2
self.proposedWindowSize = 2
# device communication control
self.dccEnableDisable = 'enable'
# how long the state machine is willing to wait for the application
# layer to form a response and send it
self.applicationTimeout = 3000
def get_next_invoke_id(self, addr):
"""Called by clients to get an unused invoke ID."""
if _debug: StateMachineAccessPoint._debug("get_next_invoke_id")
initialID = self.nextInvokeID
while 1:
invokeID = self.nextInvokeID
self.nextInvokeID = (self.nextInvokeID + 1) % 256
# see if we've checked for them all
if initialID == self.nextInvokeID:
raise RuntimeError("no available invoke ID")
for tr in self.clientTransactions:
if (invokeID == tr.invokeID) and (addr == tr.pdu_address):
break
else:
break
return invokeID
def confirmation(self, pdu):
"""Packets coming up the stack are APDU's."""
if _debug: StateMachineAccessPoint._debug("confirmation %r", pdu)
# check device communication control
if self.dccEnableDisable == 'enable':
if _debug: StateMachineAccessPoint._debug(" - communications enabled")
elif self.dccEnableDisable == 'disable':
if (pdu.apduType == 0) and (pdu.apduService == 17):
if _debug: StateMachineAccessPoint._debug(" - continue with DCC request")
elif (pdu.apduType == 0) and (pdu.apduService == 20):
if _debug: StateMachineAccessPoint._debug(" - continue with reinitialize device")
elif (pdu.apduType == 1) and (pdu.apduService == 8):
if _debug: StateMachineAccessPoint._debug(" - continue with Who-Is")
else:
if _debug: StateMachineAccessPoint._debug(" - not a Who-Is, dropped")
return
elif self.dccEnableDisable == 'disableInitiation':
if _debug: StateMachineAccessPoint._debug(" - initiation disabled")
# make a more focused interpretation
atype = apdu_types.get(pdu.apduType)
if not atype:
StateMachineAccessPoint._warning(" - unknown apduType: %r", pdu.apduType)
return
# decode it
apdu = atype()
apdu.decode(pdu)
if _debug: StateMachineAccessPoint._debug(" - apdu: %r", apdu)
if isinstance(apdu, ConfirmedRequestPDU):
# find duplicates of this request
for tr in self.serverTransactions:
if (apdu.apduInvokeID == tr.invokeID) and (apdu.pduSource == tr.pdu_address):
break
else:
# build a server transaction
tr = ServerSSM(self, apdu.pduSource)
# add it to our transactions to track it
self.serverTransactions.append(tr)
# let it run with the apdu
tr.indication(apdu)
elif isinstance(apdu, UnconfirmedRequestPDU):
# deliver directly to the application
self.sap_request(apdu)
elif isinstance(apdu, SimpleAckPDU) \
or isinstance(apdu, ComplexAckPDU) \
or isinstance(apdu, ErrorPDU) \
or isinstance(apdu, RejectPDU):
# find the client transaction this is acking
for tr in self.clientTransactions:
if (apdu.apduInvokeID == tr.invokeID) and (apdu.pduSource == tr.pdu_address):
break
else:
return
# send the packet on to the transaction
tr.confirmation(apdu)
elif isinstance(apdu, AbortPDU):
# find the transaction being aborted
if apdu.apduSrv:
for tr in self.clientTransactions:
if (apdu.apduInvokeID == tr.invokeID) and (apdu.pduSource == tr.pdu_address):
break
else:
return
# send the packet on to the transaction
tr.confirmation(apdu)
else:
for tr in self.serverTransactions:
if (apdu.apduInvokeID == tr.invokeID) and (apdu.pduSource == tr.pdu_address):
break
else:
return
# send the packet on to the transaction
tr.indication(apdu)
elif isinstance(apdu, SegmentAckPDU):
# find the transaction being aborted
if apdu.apduSrv:
for tr in self.clientTransactions:
if (apdu.apduInvokeID == tr.invokeID) and (apdu.pduSource == tr.pdu_address):
break
else:
return
# send the packet on to the transaction
tr.confirmation(apdu)
else:
for tr in self.serverTransactions:
if (apdu.apduInvokeID == tr.invokeID) and (apdu.pduSource == tr.pdu_address):
break
else:
return
# send the packet on to the transaction
tr.indication(apdu)
else:
raise RuntimeError("invalid APDU (8)")
def sap_indication(self, apdu):
"""This function is called when the application is requesting
a new transaction as a client."""
if _debug: StateMachineAccessPoint._debug("sap_indication %r", apdu)
# check device communication control
if self.dccEnableDisable == 'enable':
if _debug: StateMachineAccessPoint._debug(" - communications enabled")
elif self.dccEnableDisable == 'disable':
if _debug: StateMachineAccessPoint._debug(" - communications disabled")
return
elif self.dccEnableDisable == 'disableInitiation':
if _debug: StateMachineAccessPoint._debug(" - initiation disabled")
if (apdu.apduType == 1) and (apdu.apduService == 0):
if _debug: StateMachineAccessPoint._debug(" - continue with I-Am")
else:
if _debug: StateMachineAccessPoint._debug(" - not an I-Am")
return
if isinstance(apdu, UnconfirmedRequestPDU):
# deliver to the device
self.request(apdu)
elif isinstance(apdu, ConfirmedRequestPDU):
# make sure it has an invoke ID
if apdu.apduInvokeID is None:
apdu.apduInvokeID = self.get_next_invoke_id(apdu.pduDestination)
else:
# verify the invoke ID isn't already being used
for tr in self.clientTransactions:
if (apdu.apduInvokeID == tr.invokeID) and (apdu.pduDestination == tr.pdu_address):
raise RuntimeError("invoke ID in use")
# warning for bogus requests
if (apdu.pduDestination.addrType != Address.localStationAddr) and (apdu.pduDestination.addrType != Address.remoteStationAddr):
StateMachineAccessPoint._warning("%s is not a local or remote station", apdu.pduDestination)
# create a client transaction state machine
tr = ClientSSM(self, apdu.pduDestination)
if _debug: StateMachineAccessPoint._debug(" - client segmentation state machine: %r", tr)
# add it to our transactions to track it
self.clientTransactions.append(tr)
# let it run
tr.indication(apdu)
else:
raise RuntimeError("invalid APDU (9)")
def sap_confirmation(self, apdu):
"""This function is called when the application is responding
to a request, the apdu may be a simple ack, complex ack, error, reject or abort."""
if _debug: StateMachineAccessPoint._debug("sap_confirmation %r", apdu)
if isinstance(apdu, SimpleAckPDU) \
or isinstance(apdu, ComplexAckPDU) \
or isinstance(apdu, ErrorPDU) \
or isinstance(apdu, RejectPDU) \
or isinstance(apdu, AbortPDU):
# find the appropriate server transaction
for tr in self.serverTransactions:
if (apdu.apduInvokeID == tr.invokeID) and (apdu.pduDestination == tr.pdu_address):
break
else:
return
# pass control to the transaction
tr.confirmation(apdu)
else:
raise RuntimeError("invalid APDU (10)")
bacpypes_debugging(StateMachineAccessPoint)
#
# ApplicationServiceAccessPoint
#
class ApplicationServiceAccessPoint(ApplicationServiceElement, ServiceAccessPoint):
def __init__(self, aseID=None, sapID=None):
if _debug: ApplicationServiceAccessPoint._debug("__init__ aseID=%r sapID=%r", aseID, sapID)
ApplicationServiceElement.__init__(self, aseID)
ServiceAccessPoint.__init__(self, sapID)
def indication(self, apdu):
if _debug: ApplicationServiceAccessPoint._debug("indication %r", apdu)
if isinstance(apdu, ConfirmedRequestPDU):
# assume no errors found
error_found = None
# look up the class associated with the service
atype = confirmed_request_types.get(apdu.apduService)
if not atype:
if _debug: ApplicationServiceAccessPoint._debug(" - no confirmed request decoder")
error_found = UnrecognizedService()
# no error so far, keep going
if not error_found:
try:
xpdu = atype()
xpdu.decode(apdu)
except RejectException, err:
ApplicationServiceAccessPoint._debug(" - decoding reject: %r", err)
error_found = err
except AbortException, err:
ApplicationServiceAccessPoint._debug(" - decoding abort: %r", err)
error_found = err
# no error so far, keep going
if not error_found:
if _debug: ApplicationServiceAccessPoint._debug(" - no decoding error")
try:
# forward the decoded packet
self.sap_request(xpdu)
except RejectException, err:
ApplicationServiceAccessPoint._debug(" - execution reject: %r", err)
error_found = err
except AbortException, err:
ApplicationServiceAccessPoint._debug(" - execution abort: %r", err)
error_found = err
# if there was an error, send it back to the client
if isinstance(error_found, RejectException):
if _debug: ApplicationServiceAccessPoint._debug(" - reject exception: %r", error_found)
reject_pdu = RejectPDU(reason=error_found.rejectReason)
reject_pdu.set_context(apdu)
if _debug: ApplicationServiceAccessPoint._debug(" - reject_pdu: %r", reject_pdu)
# send it to the client
self.response(reject_pdu)
elif isinstance(error_found, AbortException):
if _debug: ApplicationServiceAccessPoint._debug(" - abort exception: %r", error_found)
abort_pdu = AbortPDU(reason=error_found.abortReason)
abort_pdu.set_context(apdu)
if _debug: ApplicationServiceAccessPoint._debug(" - abort_pdu: %r", abort_pdu)
# send it to the client
self.response(abort_pdu)
elif isinstance(apdu, UnconfirmedRequestPDU):
atype = unconfirmed_request_types.get(apdu.apduService)
if not atype:
if _debug: ApplicationServiceAccessPoint._debug(" - no unconfirmed request decoder")
return
try:
xpdu = atype()
xpdu.decode(apdu)
except RejectException, err:
ApplicationServiceAccessPoint._debug(" - decoding reject: %r", err)
return
except AbortException, err:
ApplicationServiceAccessPoint._debug(" - decoding abort: %r", err)
return
try:
# forward the decoded packet
self.sap_request(xpdu)
except RejectException, err:
ApplicationServiceAccessPoint._debug(" - execution reject: %r", err)
except AbortException, err:
ApplicationServiceAccessPoint._debug(" - execution abort: %r", err)
else:
if _debug: ApplicationServiceAccessPoint._debug(" - unknown PDU type?!")
def sap_indication(self, apdu):
if _debug: ApplicationServiceAccessPoint._debug("sap_indication %r", apdu)
if isinstance(apdu, ConfirmedRequestPDU):
try:
xpdu = ConfirmedRequestPDU()
apdu.encode(xpdu)
apdu._xpdu = xpdu
except Exception, err:
ApplicationServiceAccessPoint._exception("confirmed request encoding error: %r", err)
return
elif isinstance(apdu, UnconfirmedRequestPDU):
try:
xpdu = UnconfirmedRequestPDU()
apdu.encode(xpdu)
apdu._xpdu = xpdu
except Exception, err:
ApplicationServiceAccessPoint._exception("unconfirmed request encoding error: %r", err)
return
else:
if _debug: ApplicationServiceAccessPoint._debug(" - unknown PDU type?!")
return
if _debug: ApplicationServiceAccessPoint._debug(" - xpdu %r", xpdu)
# forward the encoded packet
self.request(xpdu)
# if the upper layers of the application did not assign an invoke ID,
# copy the one that was assigned on its way down the stack
if isinstance(apdu, ConfirmedRequestPDU) and apdu.apduInvokeID is None:
if _debug: ApplicationServiceAccessPoint._debug(" - pass invoke ID upstream %r", xpdu.apduInvokeID)
apdu.apduInvokeID = xpdu.apduInvokeID
def confirmation(self, apdu):
if _debug: ApplicationServiceAccessPoint._debug("confirmation %r", apdu)
if isinstance(apdu, SimpleAckPDU):
xpdu = apdu
elif isinstance(apdu, ComplexAckPDU):
atype = complex_ack_types.get(apdu.apduService)
if not atype:
if _debug: ApplicationServiceAccessPoint._debug(" - no complex ack decoder")
return
try:
xpdu = atype()
xpdu.decode(apdu)
except Exception, err:
ApplicationServiceAccessPoint._exception("complex ack decoding error: %r", err)
return
elif isinstance(apdu, ErrorPDU):
atype = error_types.get(apdu.apduService)
if not atype:
if _debug: ApplicationServiceAccessPoint._debug(" - no special error decoder")
atype = Error
try:
xpdu = atype()
xpdu.decode(apdu)
except Exception, err:
ApplicationServiceAccessPoint._exception("error PDU decoding error: %r", err)
xpdu = Error(errorClass=0, errorCode=0)
elif isinstance(apdu, RejectPDU):
xpdu = apdu
elif isinstance(apdu, AbortPDU):
xpdu = apdu
else:
if _debug: ApplicationServiceAccessPoint._debug(" - unknown PDU type")
return
if _debug: ApplicationServiceAccessPoint._debug(" - xpdu %r", xpdu)
# forward the decoded packet
self.sap_response(xpdu)
def sap_confirmation(self, apdu):
if _debug: ApplicationServiceAccessPoint._debug("sap_confirmation %r", apdu)
if isinstance(apdu, SimpleAckPDU):
xpdu = apdu
elif isinstance(apdu, ComplexAckPDU):
xpdu = ComplexAckPDU()
apdu.encode(xpdu)
elif isinstance(apdu, ErrorPDU):
xpdu = ErrorPDU()
apdu.encode(xpdu)
elif isinstance(apdu, RejectPDU):
xpdu = apdu
elif isinstance(apdu, AbortPDU):
xpdu = apdu
else:
if _debug: ApplicationServiceAccessPoint._debug(" - unknown PDU type")
return
if _debug: ApplicationServiceAccessPoint._debug(" - xpdu %r", xpdu)
# forward the encoded packet
self.response(xpdu)
bacpypes_debugging(ApplicationServiceAccessPoint)
|
py | b411c19ee6d2028f3d0e930c2d789a23b69b7aef | import os
import numpy as np
import pandas as pd
import scanpy as sc
import torch
import torch.optim as optim
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torch.utils.data.dataset import TensorDataset
from src.Model import VAE_EAD
from src.utils import evaluate, extractEdgesFromMatrix
Tensor = torch.cuda.FloatTensor
class non_celltype_GRN_model:
def __init__(self, opt):
self.opt = opt
try:
os.mkdir(opt.save_name)
except:
print('dir exist')
def initalize_A(self, data):
num_genes = data.shape[1]
A = np.ones([num_genes, num_genes]) / (num_genes - 1) + (np.random.rand(num_genes * num_genes) * 0.0002).reshape(
[num_genes, num_genes])
for i in range(len(A)):
A[i, i] = 0
return A
def init_data(self):
Ground_Truth = pd.read_csv(self.opt.net_file, header=0)
data = sc.read(self.opt.data_file)
gene_name = list(data.var_names)
data_values = data.X
Dropout_Mask = (data_values != 0).astype(float)
data_values = (data_values - data_values.mean(0)) / (data_values.std(0))
data = pd.DataFrame(data_values, index=list(data.obs_names), columns=gene_name)
TF = set(Ground_Truth['Gene1'])
All_gene = set(Ground_Truth['Gene1']) | set(Ground_Truth['Gene2'])
num_genes, num_nodes = data.shape[1], data.shape[0]
Evaluate_Mask = np.zeros([num_genes, num_genes])
TF_mask = np.zeros([num_genes, num_genes])
for i, item in enumerate(data.columns):
for j, item2 in enumerate(data.columns):
if i == j:
continue
if item2 in TF and item in All_gene:
Evaluate_Mask[i, j] = 1
if item2 in TF:
TF_mask[i, j] = 1
feat_train = torch.FloatTensor(data.values)
train_data = TensorDataset(feat_train, torch.LongTensor(list(range(len(feat_train)))),
torch.FloatTensor(Dropout_Mask))
dataloader = DataLoader(train_data, batch_size=self.opt.batch_size, shuffle=True, num_workers=1)
truth_df = pd.DataFrame(np.zeros([num_genes, num_genes]), index=data.columns, columns=data.columns)
for i in range(Ground_Truth.shape[0]):
truth_df.loc[Ground_Truth.iloc[i, 1], Ground_Truth.iloc[i, 0]] = 1
A_truth = truth_df.values
idx_rec, idx_send = np.where(A_truth)
truth_edges = set(zip(idx_send, idx_rec))
return dataloader, Evaluate_Mask, num_nodes, num_genes, data, truth_edges, TF_mask, gene_name
def train_model(self):
opt = self.opt
dataloader, Evaluate_Mask, num_nodes, num_genes, data, truth_edges, TFmask2, gene_name = self.init_data()
adj_A_init = self.initalize_A(data)
vae = VAE_EAD(adj_A_init, 1, opt.n_hidden, opt.K).float().cuda()
optimizer = optim.RMSprop(vae.parameters(), lr=opt.lr)
optimizer2 = optim.RMSprop([vae.adj_A], lr=opt.lr * 0.2)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=opt.lr_step_size, gamma=opt.gamma)
best_Epr = 0
vae.train()
for epoch in range(opt.n_epochs + 1):
loss_all, mse_rec, loss_kl, data_ids, loss_tfs, loss_sparse = [], [], [], [], [], []
if epoch % (opt.K1 + opt.K2) < opt.K1:
vae.adj_A.requires_grad = False
else:
vae.adj_A.requires_grad = True
for i, data_batch in enumerate(dataloader, 0):
optimizer.zero_grad()
inputs, data_id, dropout_mask = data_batch
inputs = Variable(inputs.type(Tensor))
data_ids.append(data_id.cpu().detach().numpy())
temperature = max(0.95 ** epoch, 0.5)
loss, loss_rec, loss_gauss, loss_cat, dec, y, hidden = vae(inputs, dropout_mask=None,
temperature=temperature, opt=opt)
sparse_loss = opt.alpha * torch.mean(torch.abs(vae.adj_A))
loss = loss + sparse_loss
loss.backward()
mse_rec.append(loss_rec.item())
loss_all.append(loss.item())
loss_kl.append(loss_gauss.item() + loss_cat.item())
loss_sparse.append(sparse_loss.item())
if epoch % (opt.K1 + opt.K2) < opt.K1:
optimizer.step()
else:
optimizer2.step()
scheduler.step()
if epoch % (opt.K1 + opt.K2) >= opt.K1:
Ep, Epr = evaluate(vae.adj_A.cpu().detach().numpy(), truth_edges, Evaluate_Mask)
best_Epr = max(Epr, best_Epr)
print('epoch:', epoch, 'Ep:', Ep, 'Epr:', Epr, 'loss:',
np.mean(loss_all), 'mse_loss:', np.mean(mse_rec), 'kl_loss:', np.mean(loss_kl), 'sparse_loss:',
np.mean(loss_sparse))
extractEdgesFromMatrix(vae.adj_A.cpu().detach().numpy(), gene_name, TFmask2).to_csv(
opt.save_name + '/GRN_inference_result.tsv', sep='\t', index=False)
|
py | b411c252179186c03368f959f1889056501c98d4 | #!/usr/bin/env python
# -*- coding: ASCII -*-
"""
:Author: Martin Kircher
:Contact: [email protected]
:Date: *18.07.2016
"""
import sys, os
from optparse import OptionParser
from collections import defaultdict
import gzip
def selColumnsToVector(selColumns,lenColumns):
selColumns.sort()
last = 0
res = ""
for elem in selColumns:
res += (elem-last)*"0\t"+"1\t"
last=elem+1
res += (lenColumns-last)*"0\t"
return res.rstrip()
def sharedPrefix(str1,str2):
counter = 0
for i in range(min(len(str1),len(str2))):
if str1[i] == str2[i]: counter+=1
else: break
return counter
def sharedPostfix(str1,str2):
counter = 0
for i in range(1,min(len(str1),len(str2))+1):
if str1[-i] == str2[-i]: counter+=1
else: break
return counter
parser = OptionParser("%prog [options] filename")
parser.add_option("-a","--assignment", dest="assignment", help="Assignment of variants and tags (def '../assignment/LDLR.variants.txt.gz')",default="../assignment/LDLR.variants.txt.gz")
#parser.add_option("-i","--InDels", dest="InDels", help="Keep barcodes with InDels (def Off)",default=False,action="store_true")
#parser.add_option("-e","--IgnoreInDels", dest="IgnoreInDels", help="Ignore InDel annotations for barcodes (def Off)",default=False,action="store_true")
parser.add_option("-v","--verbose", dest="verbose", help="Turn on verbose output (def Off)",default=False,action="store_true")
(options, args) = parser.parse_args()
if not os.path.exists(options.assignment):
sys.stderr.write("Error: Assignment file does not exist\n")
sys.exit()
if len(args) != 1:
sys.stderr.write("Error: Only one count file allowed as input.\n")
sys.exit()
else:
filename = args[0]
if not os.path.exists(filename):
sys.stderr.write("Error: Input file does not exist.\n")
sys.exit()
variants = defaultdict(set)
allVariants = set()
poolCounts = {}
isWT = set()
lPrefix = None
infile = gzip.open(options.assignment) if options.assignment.endswith(".gz") else open(options.assignment)
for line in infile:
if line.startswith('#'): continue
fields = line.rstrip().split()
wrongIndel = False
newfields = [fields[0]]
for elem in fields[1:]:
#"IRF6:616:ATTT>ATT"
var=elem.split(":")
if (len(var[2]) == 3):
if int(var[1]) > 20:
newfields.append(elem)
else:
wrongIndel = True
break
else:
ref,alt = var[2].split(">")
pos = int(var[1])
trim = sharedPostfix(ref,alt)
if trim > 0:
ref = ref[:-trim]
alt = alt[:-trim]
offset = sharedPrefix(ref,alt)
if offset > 0:
pos+=offset
ref=ref[offset:]
alt=alt[offset:]
if len(ref) == 0: ref = "."
if len(alt) == 0: alt = "."
if len(ref) == 1 and (ref != ".") and alt == "." and pos > 20:
newfields.append("%s:%d:%s>%s"%(var[0],pos,ref,alt))
else:
wrongIndel = True
break
if wrongIndel: continue
fields=newfields
if len(fields) == 1:
isWT.add(fields[0])
#fields.append("WT:0:X>X")
for elem in fields[1:]:
#if options.IgnoreInDels:
#var=elem.split(":")[-1]
#if (len(var) != 3) or ("." in var): continue
fkey = elem.split(":")
if lPrefix == None: lPrefix = len(fkey[0])
elif len(fkey[0]) < lPrefix: lPrefix = len(fkey[0])
allVariants.add((int(fkey[1]),elem))
variants[fields[0]].add(elem)
tags = fields[0].split(',')
if len(tags) > 1:
for tag in tags:
poolCounts[tag]=fields[0]
infile.close()
if options.verbose: sys.stderr.write("Read %d variant to barcode assignments\n"%(len(variants)))
poolCountVal1 = defaultdict(int)
poolCountVal2 = defaultdict(int)
infile = gzip.open(filename) if filename.endswith(".gz") else open(filename)
for line in infile:
if line.startswith('#'): continue
fields = line.rstrip().split()
if len(fields) > 2:
if fields[0] in poolCounts:
val1 = int(fields[1])
val2 = int(fields[2])
poolCountVal1[poolCounts[fields[0]]]+=val1
poolCountVal2[poolCounts[fields[0]]]+=val2
infile.close()
if options.verbose: sys.stderr.write("Read %d variants\n"%(len(allVariants)))
lPrefix+=1
columnAssignment = {}
header = "#Barcode\tDNA\tRNA\t"
for ind,(pos,variant) in enumerate(sorted(allVariants)):
columnAssignment[variant]=ind
header += variant[lPrefix:].replace(":","_").replace(">",".").replace("..",".d")+"\t"
header = header.rstrip()
sys.stdout.write(header+"\n")
totalVarColumns = len(allVariants)
del allVariants
infile = gzip.open(filename) if filename.endswith(".gz") else open(filename)
for line in infile:
if line.startswith('#'): continue
fields = line.rstrip().split()
if len(fields) > 2 and ((fields[0] in variants) or (fields[0] in isWT)):
if fields[0] in poolCounts:
pname = poolCounts[fields[0]]
selColumns = []
for varName in variants[pname]:
if varName in columnAssignment: selColumns.append(columnAssignment[varName])
sys.stdout.write("%s\t%s\t%s\t%s\n"%(pname,poolCountVal1[pname],poolCountVal1[pname],selColumnsToVector(selColumns,totalVarColumns)))
else:
selColumns = []
for varName in variants[fields[0]]:
if varName in columnAssignment: selColumns.append(columnAssignment[varName])
sys.stdout.write("%s\t%s\t%s\t%s\n"%(fields[0],fields[1],fields[2],selColumnsToVector(selColumns,totalVarColumns)))
infile.close()
|
py | b411c2fa017e12333d0d1480fc72575539334890 | import pickle
import numpy as np
from numpy.testing import (assert_almost_equal, assert_equal, assert_,
assert_allclose)
from scipy.stats import norm, uniform
from refnx.analysis import (Interval, PDF, Parameter, Parameters,
is_parameters)
from refnx.analysis.parameter import (constraint_tree,
build_constraint_from_tree)
class TestParameter(object):
def setup_method(self):
pass
def test_parameter(self):
# simple test of constraint
x = Parameter(5.)
y = Parameter(1.)
y.constraint = x
assert(x in y.dependencies())
y.constraint = x * 2.
assert_equal(y.value, x.value * 2.)
# parameter should be in y's dependencies
assert(x in y._deps)
assert(x in y.dependencies())
# if you've constrained a parameter it shouldn't be varying
assert_(y.vary is False)
# you can't set a constraint on a parameter with an expression that
# already involves the parameter
from pytest import raises
with raises(ValueError):
x.constraint = y
# try a negative value
x.value = -1.
assert_equal(y.value, -2.)
# nested constraints
z = Parameter(1.)
z.constraint = x + y
assert_equal(z.value, -3)
# check that nested circular constraints aren't allowed
with raises(ValueError):
x.constraint = z
# z = x + y --> z = x + 2*x
# therefore y shouldn't be in z's dependencies, but x should be.
assert(x in z.dependencies())
assert(y not in z.dependencies())
# absolute value constraint
y.constraint = abs(x)
assert_equal(y.value, 1)
# sin constraint
y.constraint = np.sin(x) + 2.
assert_equal(y.value, 2. + np.sin(x.value))
def test_repr(self):
p = Parameter(value=5, name='pop', vary=True)
q = eval(repr(p))
assert(q.name == 'pop')
assert_allclose(q.value, p.value)
p.bounds.lb = -5
q = eval(repr(p))
assert_allclose(q.bounds.lb, -5)
assert_allclose(q.bounds.ub, np.inf)
p = Parameter(value=5, vary=True)
q = eval(repr(p))
assert_allclose(q.value, p.value)
assert_allclose(q.vary, p.vary)
def test_func_attribute(self):
# a Parameter object should have math function attributes
a = Parameter(1)
assert_(hasattr(a, 'sin'))
def test_remove_constraint(self):
x = Parameter(5.)
y = Parameter(1.)
y.constraint = x * 2.
y.constraint = None
assert_(y.vary is False)
assert_equal(y.value, 10)
assert_(y._constraint is None)
def test_parameter_bounds(self):
x = Parameter(4, bounds=Interval(-4, 4))
assert_equal(x.logp(), uniform.logpdf(0, -4, 8))
x.bounds = None
assert_(isinstance(x._bounds, Interval))
assert_equal(x.bounds.lb, -np.inf)
assert_equal(x.bounds.ub, np.inf)
assert_equal(x.logp(), 0)
x.setp(bounds=norm(0, 1))
assert_almost_equal(x.logp(1), norm.logpdf(1, 0, 1))
# all created parameters were mistakenly being given the same
# default bounds instance!
x = Parameter(4)
y = Parameter(5)
assert_(id(x.bounds) != id(y.bounds))
def test_range(self):
x = Parameter(0.)
x.range(-1, 1.)
assert_equal(x.bounds.lb, -1)
assert_equal(x.bounds.ub, 1.)
vals = x.valid(np.linspace(-100, 100, 10000))
assert_(np.min(vals) >= -1)
assert_(np.max(vals) <= 1)
def test_parameter_attrib(self):
# each parameter should have bound math methods
a = Parameter(1.)
assert_(hasattr(a, 'sin'))
def test_pickle(self):
# a parameter and a constrained parameter should be pickleable
bounds = PDF(norm(1., 2.))
x = Parameter(1, bounds=bounds)
pkl = pickle.dumps(x)
unpkl = pickle.loads(pkl)
# test pickling on a constrained parameter system
a = Parameter(1.)
b = Parameter(2.)
b.constraint = np.sin(a)
assert_(hasattr(a, 'sin'))
c = [a, b]
pkl = pickle.dumps(c)
unpkl = pickle.loads(pkl)
d, e = unpkl
d.value = 2.
assert_equal(e.value, np.sin(2.))
# should still have all math functions
assert_(hasattr(d, 'sin'))
def test_or(self):
# concatenation of Parameter instances
a = Parameter(1, name='a')
b = Parameter(2, name='b')
c = Parameters(name='c')
c.append(a)
c.append(b)
# concatenate Parameter instances
d = a | b
assert_(is_parameters(d))
# concatenate Parameter with Parameters
d = a | c
assert_(is_parameters(d))
assert_equal(len(d), 2)
# a, a, b
assert_equal(len(d.flattened()), 3)
def test_constraint_analyser(self):
a = Parameter(1)
b = Parameter(2, constraint=a)
c = Parameter(2.)
d = Parameter(3, constraint=np.cos(b + np.sin(a) + 2 * (a + b + c)))
val = d.value
tree = constraint_tree(d.constraint)
new_constraint = build_constraint_from_tree(tree)
assert_allclose(new_constraint.value, val)
a.value = 10
assert_allclose(new_constraint.value, d.value)
# inject constraint into parameter
e = Parameter(1)
e.constraint = new_constraint
a.value = 11
assert_allclose(e.value, d.value)
# check that it's possible to build a constraint tree from a single
# param
tree = constraint_tree(b.constraint)
new_constraint = build_constraint_from_tree(tree)
e = Parameter(1)
e.constraint = new_constraint
a.value = 0.1234
assert_allclose(e.value, a.value)
# check that it's possible to build a constraint tree from a single
# param
e = Parameter(1)
e.constraint = 2
assert_allclose(e.value, 2)
class TestParameters(object):
def setup_method(self):
self.a = Parameter(1, name='a')
self.b = Parameter(2, name='b')
self.m = Parameters()
self.m.append(self.a)
self.m.append(self.b)
def test_retrieve_by_name(self):
p = self.m['a']
assert_(p is self.a)
# or by index
p = self.m[0]
assert_(p is self.a)
def test_repr(self):
p = Parameter(value=5, vary=False, name='test')
g = Parameters(name='name')
f = Parameters()
f.append(p)
f.append(g)
q = eval(repr(f))
assert(q.name is None)
assert_equal(q[0].value, 5)
assert(q[0].vary is False)
assert(isinstance(q[1], Parameters))
def test_set_by_name(self):
c = Parameter(3.)
self.m['a'] = c
assert_(self.m[0] is c)
# can't set an entry by name, if there isn't an existing name in this
# Parameters instance.
from pytest import raises
with raises(ValueError):
self.m['abc'] = c
def test_parameters(self):
# we've added two parameters
self.a.vary = True
self.b.vary = True
assert_equal(len(self.m.flattened()), 2)
# the two entries should just be the objects
assert_(self.m.varying_parameters()[0] is self.a)
assert_(self.m.varying_parameters()[1] is self.b)
def test_varying_parameters(self):
# even though we've added a twice we should still only see 2
# varying parameters
self.a.vary = True
self.b.vary = True
p = self.a | self.b | self.a
assert_equal(len(p.varying_parameters()), 2)
def test_pickle_parameters(self):
# need to check that Parameters can be pickled/unpickle
pkl = pickle.dumps(self.m)
pickle.loads(pkl)
def test_or(self):
# concatenation of Parameters
# Parameters with Parameter
c = self.m | self.b
assert_equal(len(c), 3)
assert_equal(len(c.flattened()), 3)
assert_(c.flattened()[1] is self.b)
assert_(c.flattened()[2] is self.b)
# Parameters with Parameters
c = Parameters(name='c')
d = c | self.m
assert_(d.name == 'c')
def test_ior(self):
# concatenation of Parameters
# Parameters with Parameter
c = Parameters(name='c')
c |= self.b
assert_equal(len(c), 1)
assert_equal(len(c.flattened()), 1)
assert_(c.flattened()[0] is self.b)
# Parameters with Parameters
c = Parameters(name='c')
c |= self.m
assert_(c.name == 'c')
assert_equal(len(c), 1)
assert_equal(len(c.flattened()), 2)
assert_(c.flattened()[1] is self.b)
|
py | b411c4946f3c5ff55400b8f028f704db19279c19 | # Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for mobilebert_edgetpu_trainer.py."""
import tensorflow as tf
from official.projects.edgetpu.nlp import mobilebert_edgetpu_trainer
from official.projects.edgetpu.nlp.configs import params
from official.projects.edgetpu.nlp.modeling import model_builder
# Helper function to create dummy dataset
def _dummy_dataset():
def dummy_data(_):
dummy_ids = tf.zeros((1, 64), dtype=tf.int32)
dummy_lm = tf.zeros((1, 64), dtype=tf.int32)
return dict(
input_word_ids=dummy_ids,
input_mask=dummy_ids,
input_type_ids=dummy_ids,
masked_lm_positions=dummy_lm,
masked_lm_ids=dummy_lm,
masked_lm_weights=tf.cast(dummy_lm, dtype=tf.float32),
next_sentence_labels=tf.zeros((1, 1), dtype=tf.int32))
dataset = tf.data.Dataset.range(1)
dataset = dataset.repeat()
dataset = dataset.map(
dummy_data, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset
class EdgetpuBertTrainerTest(tf.test.TestCase):
def setUp(self):
super(EdgetpuBertTrainerTest, self).setUp()
self.experiment_params = params.EdgeTPUBERTCustomParams()
self.strategy = tf.distribute.get_strategy()
self.experiment_params.train_datasest.input_path = 'dummy'
self.experiment_params.eval_dataset.input_path = 'dummy'
def test_train_model_locally(self):
"""Tests training a model locally with one step."""
teacher_model = model_builder.build_bert_pretrainer(
pretrainer_cfg=self.experiment_params.teacher_model,
name='teacher')
_ = teacher_model(teacher_model.inputs)
student_model = model_builder.build_bert_pretrainer(
pretrainer_cfg=self.experiment_params.student_model,
name='student')
_ = student_model(student_model.inputs)
trainer = mobilebert_edgetpu_trainer.MobileBERTEdgeTPUDistillationTrainer(
teacher_model=teacher_model,
student_model=student_model,
strategy=self.strategy,
experiment_params=self.experiment_params)
# Rebuild dummy dataset since loading real dataset will cause timeout error.
trainer.train_dataset = _dummy_dataset()
trainer.eval_dataset = _dummy_dataset()
train_dataset_iter = iter(trainer.train_dataset)
eval_dataset_iter = iter(trainer.eval_dataset)
trainer.train_loop_begin()
trainer.train_step(train_dataset_iter)
trainer.eval_step(eval_dataset_iter)
if __name__ == '__main__':
tf.test.main()
|
py | b411c74978e7fb6e35093b30b85d576e4d693c24 | from datetime import date
from django.test import RequestFactory, TestCase
from wagtail.core.models import Site
from jobmanager.blocks import JobListingList, JobListingTable
from jobmanager.models.django import JobCategory
from jobmanager.models.pages import JobListingPage
class JobListingBlockTestUtils:
def setUp(self):
self.root_page = Site.objects.get(is_default_site=True).root_page
self.division = JobCategory.objects.create(job_category='Test')
self.request = RequestFactory().get('/')
Site.find_for_request(self.request)
def make_job(self, title, live=True, close_date=None):
page = JobListingPage(
live=live,
title=title,
description='Test description',
open_date=date(2000, 1, 1),
close_date=close_date or date(2099, 12, 1),
salary_min=1,
salary_max=100,
division=self.division
)
self.root_page.add_child(instance=page)
return page
class JobListingListTestCase(JobListingBlockTestUtils, TestCase):
def render_block(self):
block = JobListingList()
value = block.to_python({
'more_jobs_page': self.root_page.pk,
})
return block.render(value, context={'request': self.request})
def test_no_jobs_message(self):
self.assertIn('There are no current openings', self.render_block())
def test_shows_jobs(self):
self.make_job('live1')
# This job should not show up because it is closed.
self.make_job('live_closed', close_date=date(1970, 1, 1)),
self.make_job('live2')
# This job should not show up because it is not live.
self.make_job('draft', live=False)
self.make_job('live3')
self.make_job('live4')
self.make_job('live5')
# These jobs should not appear because the list should be limited to 5.
self.make_job('live6')
self.make_job('live7')
self.make_job('live8')
html = self.render_block()
self.assertIn('live1', html)
self.assertNotIn('live_closed', html)
self.assertIn('live2', html)
self.assertNotIn('draft', html)
self.assertIn('live3', html)
self.assertIn('live4', html)
self.assertIn('live5', html)
self.assertNotIn('live6', html)
self.assertNotIn('live7', html)
self.assertNotIn('live8', html)
def test_database_queries(self):
for i in range(5):
self.make_job(f'live{i}')
# We expect three database queries here. First, Wagtail has to look up
# the site root paths. These get cached on the request object. Then,
# all of the JobListingPages are retrieved in a single query. Finally,
# another query retrieves the URL for the "more jobs page" link.
with self.assertNumQueries(3):
self.render_block()
class JobListingTableTestCase(JobListingBlockTestUtils, TestCase):
def render_block(self):
return JobListingTable().render({}, context={'request': self.request})
def test_no_jobs_message(self):
self.assertIn('There are no current openings', self.render_block())
def test_shows_jobs(self):
self.make_job('live1')
# This job should not show up because it is closed.
self.make_job('live_closed', close_date=date(1970, 1, 1)),
self.make_job('live2')
# This job should not show up because it is not live.
self.make_job('draft', live=False)
self.make_job('live3')
self.make_job('live4')
self.make_job('live5')
self.make_job('live6')
html = self.render_block()
self.assertIn('live1', html)
self.assertNotIn('live_closed', html)
self.assertIn('live2', html)
self.assertNotIn('draft', html)
self.assertIn('live3', html)
self.assertIn('live4', html)
self.assertIn('live5', html)
self.assertIn('live6', html)
def test_database_queries(self):
for i in range(5):
self.make_job(f'live{i}')
# We expect 13 database queries:
#
# 1. Wagtail has to look up the site root paths, which get cached on
# the request object.
# 2. One query to retrieve all of the job listing pages.
# 3. One query to prefetch all of the job grades.
# 5x2. Two additional queries per job to retrieve offices and
# regions.
#
# This could be greatly optimized (reducing to only 2 additional
# location queries total) if django-modelcluster adds prefetch_related
# support to ParentalManyToManyField:
#
# https://github.com/wagtail/django-modelcluster/issues/101
with self.assertNumQueries(13):
self.render_block()
|
py | b411c764e3903d814a31cf784dd92b5016e8b0ec | from nose.tools import eq_
from mhcnames import parse_allele_name, AlleleName
def test_dog_class2_allele():
eq_(parse_allele_name("DLA-DQA1*00101"),
AlleleName("DLA", "DQA1", "01", "01"))
|
py | b411c963fae87f1fb2d8e5a0d6d0368b9e657b6a | #it is the basic program for dictionary in python
#dictionary works on the key - value concept
dict1 = dict()
dict1 = {
1 :"JavaScript" ,
2 :"TypeScript",
3 : "C++"
}
print(" 1] At first dictionry looks like this ->> " ,dict1)
dict1[1] = 'Python'
print(" 2] After modifying the value of key 1 dictionary looks like this ->> ",dict1) |
py | b411ca1712491fd29b0097e49ef4d761544ce5f4 | # Copyright (c) 2021 The Regents of the University of Michigan
# Part of fix-license-header, released under the BSD 3-Clause License.
"""Add license header to files.
This script operates on text files and replaces the initial comment block with a
license header. It is designed to be used with pre-commit.
The header is constructed from the first N lines from ``license-file`` (when
provided) and the given additional header lines. The comment prefix is added to
the start of each header line. with the given comment prefix.
For each file given in the arguments, it checks if the first commented lines in
the file match the header. When they fail to match, the script rewrites the
initial comments in the file with the given header.
"""
import argparse
import sys
def fix_file(f, header_lines, prefix, keep_before, keep_after):
"""Fix one file.
Return 0 if the file is not modified, 1 if it is.
"""
line = f.readline()
if line.endswith(b'\r\n'):
line_ending = b'\r\n'
else:
line_ending = b'\n'
before = b''
after = b''
file_header = []
while (line.startswith(prefix)
or any([line.startswith(s) for s in keep_before])):
if any([line.startswith(s) for s in keep_before]):
before += line
elif any([line.startswith(s) for s in keep_after]):
after += line
else:
file_header.append(line[len(prefix):].strip())
line = f.readline()
# read the contents of the file
file_contents = line + f.read()
# check if the header is correct
if file_header == header_lines and file_contents.startswith(line_ending):
return 0
else:
# header doesn't match, rewrite file
f.seek(0)
f.truncate()
f.write(before)
for line in header_lines:
f.write(prefix + line + line_ending)
if len(after) > 0:
f.write(line_ending)
f.write(after)
if len(file_contents) > 0 and not file_contents.startswith(line_ending):
f.write(line_ending)
f.write(file_contents)
return 1
def main(argv=None):
"""The main entrypoint."""
parser = argparse.ArgumentParser('Fixes the license headers in files.',)
parser.add_argument('--license-file', help='License file to read', type=str)
parser.add_argument('--start',
help='Number of lines to ignore (default: 0)',
type=int,
default=0)
parser.add_argument('--num',
help='Number of lines to read (default: 1)',
type=int,
default=1)
parser.add_argument('--add',
action='append',
help='Line to add after the license file '
'[can specify multiple times]',
type=str)
parser.add_argument('--keep-before',
action='append',
help='Keep lines starting with this before the header '
'[can specify multiple times]',
type=str)
parser.add_argument('--keep-after',
action='append',
help='Keep lines that start with this after the header '
'[can specify multiple times]',
type=str)
parser.add_argument('--comment-prefix',
help='Comment prefix',
type=str,
default='#')
parser.add_argument('filenames', nargs='*', help='Filenames to fix')
args = parser.parse_args(argv)
# build the header
header_lines = []
if args.license_file is not None:
with open(args.license_file, 'rb') as license:
for i in range(args.start):
license.readline()
for i in range(args.num):
header_lines.append(license.readline().strip())
if args.add is not None:
for line in args.add:
header_lines.append(line.encode('utf-8'))
keep_before = []
if args.keep_before is not None:
keep_before = [s.encode('utf-8') for s in args.keep_before]
keep_after = []
if args.keep_after is not None:
keep_after = [s.encode('utf-8') for s in args.keep_after]
return_value = 0
for filename in args.filenames:
with open(filename, 'r+b') as f:
status = fix_file(f=f,
header_lines=header_lines,
prefix=args.comment_prefix.encode('utf-8') + b' ',
keep_before=keep_before,
keep_after=keep_after)
return_value |= status
if status:
print(f'Updated license header in {filename}')
sys.exit(return_value)
if __name__ == '__main__':
exit(main(sys.argv[1:]))
|
py | b411ca1c482a451fa8df4cae74c3c3ae9e86a040 | import numpy as np
import pytest
from pandas.errors import PerformanceWarning
from pandas.core.dtypes.common import is_object_dtype
import pandas as pd
from pandas import SparseDtype
import pandas._testing as tm
from pandas.arrays import SparseArray
from pandas.tests.extension import base
def make_data(fill_value):
if np.isnan(fill_value):
data = np.random.uniform(size=100)
else:
data = np.random.randint(1, 100, size=100)
if data[0] == data[1]:
data[0] += 1
data[2::3] = fill_value
return data
@pytest.fixture
def dtype():
return SparseDtype()
@pytest.fixture(params=[0, np.nan])
def data(request):
"""Length-100 PeriodArray for semantics test."""
res = SparseArray(make_data(request.param), fill_value=request.param)
return res
@pytest.fixture
def data_for_twos(request):
return SparseArray(np.ones(100) * 2)
@pytest.fixture(params=[0, np.nan])
def data_missing(request):
"""Length 2 array with [NA, Valid]"""
return SparseArray([np.nan, 1], fill_value=request.param)
@pytest.fixture(params=[0, np.nan])
def data_repeated(request):
"""Return different versions of data for count times"""
def gen(count):
for _ in range(count):
yield SparseArray(make_data(request.param), fill_value=request.param)
yield gen
@pytest.fixture(params=[0, np.nan])
def data_for_sorting(request):
return SparseArray([2, 3, 1], fill_value=request.param)
@pytest.fixture(params=[0, np.nan])
def data_missing_for_sorting(request):
return SparseArray([2, np.nan, 1], fill_value=request.param)
@pytest.fixture
def na_value():
return np.nan
@pytest.fixture
def na_cmp():
return lambda left, right: pd.isna(left) and pd.isna(right)
@pytest.fixture(params=[0, np.nan])
def data_for_grouping(request):
return SparseArray([1, 1, np.nan, np.nan, 2, 2, 1, 3], fill_value=request.param)
class BaseSparseTests:
def _check_unsupported(self, data):
if data.dtype == SparseDtype(int, 0):
pytest.skip("Can't store nan in int array.")
@pytest.mark.xfail(reason="SparseArray does not support setitem")
def test_ravel(self, data):
super().test_ravel(data)
class TestDtype(BaseSparseTests, base.BaseDtypeTests):
def test_array_type_with_arg(self, data, dtype):
assert dtype.construct_array_type() is SparseArray
class TestInterface(BaseSparseTests, base.BaseInterfaceTests):
def test_no_values_attribute(self, data):
pytest.skip("We have values")
def test_copy(self, data):
# __setitem__ does not work, so we only have a smoke-test
data.copy()
def test_view(self, data):
# __setitem__ does not work, so we only have a smoke-test
data.view()
class TestConstructors(BaseSparseTests, base.BaseConstructorsTests):
pass
class TestReshaping(BaseSparseTests, base.BaseReshapingTests):
def test_concat_mixed_dtypes(self, data):
# https://github.com/pandas-dev/pandas/issues/20762
# This should be the same, aside from concat([sparse, float])
df1 = pd.DataFrame({"A": data[:3]})
df2 = pd.DataFrame({"A": [1, 2, 3]})
df3 = pd.DataFrame({"A": ["a", "b", "c"]}).astype("category")
dfs = [df1, df2, df3]
# dataframes
result = pd.concat(dfs)
expected = pd.concat(
[x.apply(lambda s: np.asarray(s).astype(object)) for x in dfs]
)
self.assert_frame_equal(result, expected)
def test_concat_columns(self, data, na_value):
self._check_unsupported(data)
super().test_concat_columns(data, na_value)
def test_concat_extension_arrays_copy_false(self, data, na_value):
self._check_unsupported(data)
super().test_concat_extension_arrays_copy_false(data, na_value)
def test_align(self, data, na_value):
self._check_unsupported(data)
super().test_align(data, na_value)
def test_align_frame(self, data, na_value):
self._check_unsupported(data)
super().test_align_frame(data, na_value)
def test_align_series_frame(self, data, na_value):
self._check_unsupported(data)
super().test_align_series_frame(data, na_value)
def test_merge(self, data, na_value):
self._check_unsupported(data)
super().test_merge(data, na_value)
class TestGetitem(BaseSparseTests, base.BaseGetitemTests):
def test_get(self, data):
s = pd.Series(data, index=[2 * i for i in range(len(data))])
if np.isnan(s.values.fill_value):
assert np.isnan(s.get(4)) and np.isnan(s.iloc[2])
else:
assert s.get(4) == s.iloc[2]
assert s.get(2) == s.iloc[1]
def test_reindex(self, data, na_value):
self._check_unsupported(data)
super().test_reindex(data, na_value)
# Skipping TestSetitem, since we don't implement it.
class TestMissing(BaseSparseTests, base.BaseMissingTests):
def test_isna(self, data_missing):
expected_dtype = SparseDtype(bool, pd.isna(data_missing.dtype.fill_value))
expected = SparseArray([True, False], dtype=expected_dtype)
result = pd.isna(data_missing)
self.assert_equal(result, expected)
result = pd.Series(data_missing).isna()
expected = pd.Series(expected)
self.assert_series_equal(result, expected)
# GH 21189
result = pd.Series(data_missing).drop([0, 1]).isna()
expected = pd.Series([], dtype=expected_dtype)
self.assert_series_equal(result, expected)
def test_fillna_limit_pad(self, data_missing):
with tm.assert_produces_warning(PerformanceWarning):
super().test_fillna_limit_pad(data_missing)
def test_fillna_limit_backfill(self, data_missing):
with tm.assert_produces_warning(PerformanceWarning):
super().test_fillna_limit_backfill(data_missing)
def test_fillna_series_method(self, data_missing):
with tm.assert_produces_warning(PerformanceWarning):
super().test_fillna_limit_backfill(data_missing)
@pytest.mark.skip(reason="Unsupported")
def test_fillna_series(self):
# this one looks doable.
pass
def test_fillna_frame(self, data_missing):
# Have to override to specify that fill_value will change.
fill_value = data_missing[1]
result = pd.DataFrame({"A": data_missing, "B": [1, 2]}).fillna(fill_value)
if pd.isna(data_missing.fill_value):
dtype = SparseDtype(data_missing.dtype, fill_value)
else:
dtype = data_missing.dtype
expected = pd.DataFrame(
{
"A": data_missing._from_sequence([fill_value, fill_value], dtype=dtype),
"B": [1, 2],
}
)
self.assert_frame_equal(result, expected)
class TestMethods(BaseSparseTests, base.BaseMethodsTests):
def test_combine_le(self, data_repeated):
# We return a Series[SparseArray].__le__ returns a
# Series[Sparse[bool]]
# rather than Series[bool]
orig_data1, orig_data2 = data_repeated(2)
s1 = pd.Series(orig_data1)
s2 = pd.Series(orig_data2)
result = s1.combine(s2, lambda x1, x2: x1 <= x2)
expected = pd.Series(
SparseArray(
[a <= b for (a, b) in zip(list(orig_data1), list(orig_data2))],
fill_value=False,
)
)
self.assert_series_equal(result, expected)
val = s1.iloc[0]
result = s1.combine(val, lambda x1, x2: x1 <= x2)
expected = pd.Series(
SparseArray([a <= val for a in list(orig_data1)], fill_value=False)
)
self.assert_series_equal(result, expected)
def test_fillna_copy_frame(self, data_missing):
arr = data_missing.take([1, 1])
df = pd.DataFrame({"A": arr})
filled_val = df.iloc[0, 0]
result = df.fillna(filled_val)
assert df.values.base is not result.values.base
assert df.A._values.to_dense() is arr.to_dense()
def test_fillna_copy_series(self, data_missing):
arr = data_missing.take([1, 1])
ser = pd.Series(arr)
filled_val = ser[0]
result = ser.fillna(filled_val)
assert ser._values is not result._values
assert ser._values.to_dense() is arr.to_dense()
@pytest.mark.skip(reason="Not Applicable")
def test_fillna_length_mismatch(self, data_missing):
pass
def test_where_series(self, data, na_value):
assert data[0] != data[1]
cls = type(data)
a, b = data[:2]
ser = pd.Series(cls._from_sequence([a, a, b, b], dtype=data.dtype))
cond = np.array([True, True, False, False])
result = ser.where(cond)
new_dtype = SparseDtype("float", 0.0)
expected = pd.Series(
cls._from_sequence([a, a, na_value, na_value], dtype=new_dtype)
)
self.assert_series_equal(result, expected)
other = cls._from_sequence([a, b, a, b], dtype=data.dtype)
cond = np.array([True, False, True, True])
result = ser.where(cond, other)
expected = pd.Series(cls._from_sequence([a, b, b, b], dtype=data.dtype))
self.assert_series_equal(result, expected)
def test_combine_first(self, data):
if data.dtype.subtype == "int":
# Right now this is upcasted to float, just like combine_first
# for Series[int]
pytest.skip("TODO(SparseArray.__setitem__ will preserve dtype.")
super().test_combine_first(data)
def test_searchsorted(self, data_for_sorting, as_series):
with tm.assert_produces_warning(PerformanceWarning):
super().test_searchsorted(data_for_sorting, as_series)
def test_shift_0_periods(self, data):
# GH#33856 shifting with periods=0 should return a copy, not same obj
result = data.shift(0)
data._sparse_values[0] = data._sparse_values[1]
assert result._sparse_values[0] != result._sparse_values[1]
@pytest.mark.parametrize(
"method", ["argmax", "argmin"],
)
def test_argmin_argmax_all_na(self, method, data, na_value):
# overriding because Sparse[int64, 0] cannot handle na_value
self._check_unsupported(data)
super().test_argmin_argmax_all_na(method, data, na_value)
@pytest.mark.parametrize("box", [pd.array, pd.Series, pd.DataFrame])
def test_equals(self, data, na_value, as_series, box):
self._check_unsupported(data)
super().test_equals(data, na_value, as_series, box)
class TestCasting(BaseSparseTests, base.BaseCastingTests):
def test_astype_object_series(self, all_data):
# Unlike the base class, we do not expect the resulting Block
# to be ObjectBlock
ser = pd.Series(all_data, name="A")
result = ser.astype(object)
assert is_object_dtype(result._data.blocks[0].dtype)
def test_astype_object_frame(self, all_data):
# Unlike the base class, we do not expect the resulting Block
# to be ObjectBlock
df = pd.DataFrame({"A": all_data})
result = df.astype(object)
assert is_object_dtype(result._data.blocks[0].dtype)
# FIXME: these currently fail; dont leave commented-out
# check that we can compare the dtypes
# comp = result.dtypes.equals(df.dtypes)
# assert not comp.any()
def test_astype_str(self, data):
result = pd.Series(data[:5]).astype(str)
expected_dtype = pd.SparseDtype(str, str(data.fill_value))
expected = pd.Series([str(x) for x in data[:5]], dtype=expected_dtype)
self.assert_series_equal(result, expected)
@pytest.mark.xfail(raises=TypeError, reason="no sparse StringDtype")
def test_astype_string(self, data):
super().test_astype_string(data)
class TestArithmeticOps(BaseSparseTests, base.BaseArithmeticOpsTests):
series_scalar_exc = None
frame_scalar_exc = None
divmod_exc = None
series_array_exc = None
def _skip_if_different_combine(self, data):
if data.fill_value == 0:
# arith ops call on dtype.fill_value so that the sparsity
# is maintained. Combine can't be called on a dtype in
# general, so we can't make the expected. This is tested elsewhere
raise pytest.skip("Incorrected expected from Series.combine")
def test_error(self, data, all_arithmetic_operators):
pass
def test_arith_series_with_scalar(self, data, all_arithmetic_operators):
self._skip_if_different_combine(data)
super().test_arith_series_with_scalar(data, all_arithmetic_operators)
def test_arith_series_with_array(self, data, all_arithmetic_operators):
self._skip_if_different_combine(data)
super().test_arith_series_with_array(data, all_arithmetic_operators)
class TestComparisonOps(BaseSparseTests, base.BaseComparisonOpsTests):
def _compare_other(self, s, data, op_name, other):
op = self.get_op_from_name(op_name)
# array
result = pd.Series(op(data, other))
# hard to test the fill value, since we don't know what expected
# is in general.
# Rely on tests in `tests/sparse` to validate that.
assert isinstance(result.dtype, SparseDtype)
assert result.dtype.subtype == np.dtype("bool")
with np.errstate(all="ignore"):
expected = pd.Series(
SparseArray(
op(np.asarray(data), np.asarray(other)),
fill_value=result.values.fill_value,
)
)
tm.assert_series_equal(result, expected)
# series
s = pd.Series(data)
result = op(s, other)
tm.assert_series_equal(result, expected)
class TestPrinting(BaseSparseTests, base.BasePrintingTests):
@pytest.mark.xfail(reason="Different repr", strict=True)
def test_array_repr(self, data, size):
super().test_array_repr(data, size)
class TestParsing(BaseSparseTests, base.BaseParsingTests):
@pytest.mark.parametrize("engine", ["c", "python"])
def test_EA_types(self, engine, data):
expected_msg = r".*must implement _from_sequence_of_strings.*"
with pytest.raises(NotImplementedError, match=expected_msg):
super().test_EA_types(engine, data)
|
py | b411ca5abf63eca5a4d348a9dede555a5690d5f8 | import unittest
from StringIO import StringIO
from tests.performance.perf_compare import (
parse_arguments,
compare_create,
compare_get,
compare_update
)
import pandas as pd
CREATE_DF_1 = """
\tCores\tTaskNum\tSleep(s)\tUseInsConf\tStart(s)\tExec(s)
0\t1000\t5000\t10\tTrue\t3.97567\t24.73733
"""
CREATE_DF_2 = """
\tCores\tTaskNum\tSleep(s)\tUseInsConf\tStart(s)\tExec(s)
0\t1000\t5000\t10\tTrue\t4.31115\t20.98786
"""
GET_DF_1 = """
\tTaskNum\tSleep(s)\tUseInsConf\tCreates\tCreateFails\tGets\tGetFails
0\t5000\t10\t70\t3\t73\t0
"""
GET_DF_2 = """
\tTaskNum\tSleep(s)\tUseInsConf\tCreates\tCreateFails\tGets\tGetFails
0\t5000\t10\t70\t3\t60\t13
"""
UPDATE_DF_1 = """
\tNumStartTasks\tTaskIncrementEachTime\tNumOfIncrement\tSleep(s)\tUseInsConf\tTotalTimeInSeconds
5\t1\t1\t5000\t10\t850
"""
UPDATE_DF_2 = """
\tNumStartTasks\tTaskIncrementEachTime\tNumOfIncrement\tSleep(s)\tUseInsConf\tTotalTimeInSeconds
5\t1\t1\t5000\t10\t950
"""
class PerfCompareTest(unittest.TestCase):
def test_parser(self):
parser = parse_arguments(['-f1', 'PERF_1', '-f2', 'PERF_2'])
self.assertEqual(parser.file_1, 'PERF_1')
self.assertEqual(parser.file_2, 'PERF_2')
def test_compare_create(self):
df1 = pd.read_csv(StringIO(CREATE_DF_1), '\t', index_col=0)
df2 = pd.read_csv(StringIO(CREATE_DF_2), '\t', index_col=0)
df_out = compare_create(df1, df2)
self.assertEqual(df_out.iloc[0]['Perf Change'], '-0.1516')
def test_compare_get(self):
df1 = pd.read_csv(StringIO(GET_DF_1), '\t', index_col=0)
df2 = pd.read_csv(StringIO(GET_DF_2), '\t', index_col=0)
df_out = compare_get(df1, df2)
shared_fields = ['TaskNum', 'Sleep(s)', 'UseInsConf']
for field in shared_fields:
self.assertEqual(df_out.iloc[0][field],
df_out.iloc[1][field])
def test_compare_update(self):
df1 = pd.read_csv(StringIO(UPDATE_DF_1), '\t', index_col=0)
df2 = pd.read_csv(StringIO(UPDATE_DF_2), '\t', index_col=0)
df_out = compare_update(df1, df2)
self.assertEqual(df_out.iloc[0]['Time Diff'], '100')
|
py | b411ca828435583e2cf57e440ab87a267f772525 | """文字列基礎
バイトを文字列に変換する decodeメソッド (bytes -> str)
変換できない文字を無視する errors=ignore
[説明ページ]
https://tech.nkhn37.net/python-encode-decode/#encodedecode
"""
data = 'おはようございます。'
# encodeで指定文字コードにエンコードする
encode_data = data.encode('shift_jis')
print(encode_data)
print('================================')
# decodeで文字列に戻す
decode_data = encode_data.decode('utf_8', errors='ignore')
print(decode_data)
print(type(decode_data))
|
py | b411cb8b0c5c35372262732539faece4fe072505 | # Copyright 2008-2011 Nokia Networks
# Copyright 2011-2016 Ryan Tomac, Ed Manlove and contributors
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import types
from selenium import webdriver
from selenium.webdriver.support.event_firing_webdriver import EventFiringWebDriver
from SeleniumLibrary.base import keyword, LibraryComponent
from SeleniumLibrary.locators import WindowManager
from SeleniumLibrary.utils import (is_truthy, is_noney, secs_to_timestr,
timestr_to_secs)
from .webdrivertools import WebDriverCreator
class BrowserManagementKeywords(LibraryComponent):
def __init__(self, ctx):
LibraryComponent.__init__(self, ctx)
self._window_manager = WindowManager(ctx)
@keyword
def close_all_browsers(self):
"""Closes all open browsers and resets the browser cache.
After this keyword, new indexes returned from `Open Browser` keyword
are reset to 1.
This keyword should be used in test or suite teardown to make sure
all browsers are closed.
"""
self.debug('Closing all browsers.')
self.drivers.close_all()
@keyword
def close_browser(self):
"""Closes the current browser."""
if self.drivers.current:
self.debug('Closing browser with session id {}.'
.format(self.driver.session_id))
self.drivers.close()
@keyword
def open_browser(self, url=None, browser='firefox', alias=None,
remote_url=False, desired_capabilities=None,
ff_profile_dir=None, options=None, service_log_path=None):
"""Opens a new browser instance to the optional ``url``.
The ``browser`` argument specifies which browser to use. The
supported browsers are listed in the table below. The browser names
are case-insensitive and some browsers have multiple supported names.
| = Browser = | = Name(s) = |
| Firefox | firefox, ff |
| Google Chrome | googlechrome, chrome, gc |
| Headless Firefox | headlessfirefox |
| Headless Chrome | headlesschrome |
| Internet Explorer | internetexplorer, ie |
| Edge | edge |
| Safari | safari |
| Opera | opera |
| Android | android |
| Iphone | iphone |
| PhantomJS | phantomjs |
| HTMLUnit | htmlunit |
| HTMLUnit with Javascript | htmlunitwithjs |
To be able to actually use one of these browsers, you need to have
a matching Selenium browser driver available. See the
[https://github.com/robotframework/SeleniumLibrary#browser-drivers|
project documentation] for more details. Headless Firefox and
Headless Chrome are new additions in SeleniumLibrary 3.1.0
and require Selenium 3.8.0 or newer.
After opening the browser, it is possible to use optional
``url`` to navigate the browser to the desired address.
Optional ``alias`` is an alias given for this browser instance and
it can be used for switching between browsers. When same ``alias``
is given with two `Open Browser` keywords, the first keyword will
open a new browser, but the second one will switch to the already
opened browser and will not open a new browser. The ``alias``
definition overrules ``browser`` definition. When same ``alias``
is used but a different ``browser`` is defined, then switch to
a browser with same alias is done and new browser is not opened.
An alternative approach for switching is using an index returned
by this keyword. These indices start from 1, are incremented when new
browsers are opened, and reset back to 1 when `Close All Browsers`
is called. See `Switch Browser` for more information and examples.
Optional ``remote_url`` is the URL for a
[https://github.com/SeleniumHQ/selenium/wiki/Grid2|Selenium Grid].
Optional ``desired_capabilities`` can be used to configure, for example,
logging preferences for a browser or a browser and operating system
when using [http://saucelabs.com|Sauce Labs]. Desired capabilities can
be given either as a Python dictionary or as a string in the format
``key1:value1,key2:value2``.
[https://github.com/SeleniumHQ/selenium/wiki/DesiredCapabilities|
Selenium documentation] lists possible capabilities that can be
enabled.
Optional ``ff_profile_dir`` is the path to the Firefox profile
directory if you wish to overwrite the default profile Selenium
uses. Notice that prior to SeleniumLibrary 3.0, the library
contained its own profile that was used by default. The
``ff_profile_dir`` can also be an instance of the
[https://seleniumhq.github.io/selenium/docs/api/py/webdriver_firefox/selenium.webdriver.firefox.firefox_profile.html|selenium.webdriver.FirefoxProfile]
. As a third option, it is possible to use `FirefoxProfile` methods
and attributes to define the profile using methods and attributes
in the same way as with ``options`` argument. Example: It is possible
to use FirefoxProfile `set_preference` to define different
profile settings.
Optional ``options`` argument allows defining browser specific
Selenium options. Example for Chrome, the ``options`` argument
allows defining the following
[https://seleniumhq.github.io/selenium/docs/api/py/webdriver_chrome/selenium.webdriver.chrome.options.html#selenium.webdriver.chrome.options.Options|methods and attributes]
and for Firefox these
[https://seleniumhq.github.io/selenium/docs/api/py/webdriver_firefox/selenium.webdriver.firefox.options.html?highlight=firefox#selenium.webdriver.firefox.options.Options|methods and attributes]
are available. Please note that not all browsers, supported by the
SeleniumLibrary, have Selenium options available. Therefore please
consult the Selenium documentation which browsers do support
the Selenium options. If ``browser`` argument is `android` then
[https://seleniumhq.github.io/selenium/docs/api/py/webdriver_chrome/selenium.webdriver.chrome.options.html#selenium.webdriver.chrome.options.Options|Chrome options]
is used. Selenium options are also supported, when ``remote_url``
argument is used.
The SeleniumLibrary ``options`` argument accepts Selenium
options in two different formats: as a string and as Python object
which is an instance of the Selenium options class.
The string format allows defining Selenium options methods
or attributes and their arguments in Robot Framework test data.
The method and attributes names are case and space sensitive and
must match to the Selenium options methods and attributes names.
When defining a method, it must be defined in a similar way as in
python: method name, opening parenthesis, zero to many arguments
and closing parenthesis. If there is a need to define multiple
arguments for a single method, arguments must be separated with
comma, just like in Python. Example: `add_argument("--headless")`
or `add_experimental_option("key", "value")`. Attributes are
defined in a similar way as in Python: attribute name, equal sign,
and attribute value. Example, `headless=True`. Multiple methods
and attributes must be separated by a semicolon. Example:
`add_argument("--headless");add_argument("--start-maximized")`.
Arguments allow defining Python data types and arguments are
evaluated by using Python
[https://docs.python.org/3/library/ast.html#ast.literal_eval|ast.literal_eval].
Strings must be quoted with single or double quotes, example "value"
or 'value'. It is also possible to define other Python builtin
data types, example `True` or `None`, by not using quotes
around the arguments.
The string format is space friendly. Usually, spaces do not alter
the defining methods or attributes. There are two exceptions.
In some Robot Framework test data formats, two or more spaces are
considered as cell separator and instead of defining a single
argument, two or more arguments may be defined. Spaces in string
arguments are not removed and are left as is. Example
`add_argument ( "--headless" )` is same as
`add_argument("--headless")`. But `add_argument(" --headless ")` is
not same same as `add_argument ( "--headless" )`, because
spaces inside of quotes are not removed.
As last format, ``options`` argument also supports receiving
the Selenium options as Python class instance. In this case, the
instance is used as-is and the SeleniumLibrary will not convert
the instance to other formats.
For example, if the following code return value is saved to
`${options}` variable in the Robot Framework data:
| options = webdriver.ChromeOptions()
| options.add_argument('--disable-dev-shm-usage')
| return options
Then the `${options}` variable can be used as an argument to
``options``.
Example the ``options`` argument can be used to launch Chomium-based
applications which utilize the
[https://bitbucket.org/chromiumembedded/cef/wiki/UsingChromeDriver|Chromium Embedded Framework]
. To lauch Chomium-based application, use ``options`` to define
`binary_location` attribute and use `add_argument` method to define
`remote-debugging-port` port for the application. Once the browser
is opened, the test can interact with the embedded web-content of
the system under test.
Optional ``service_log_path`` argument defines the name of the
file where to write the browser driver logs. If the
``service_log_path`` argument contain a marker ``{index}``, it
will be automatically replaced with unique running
index preventing files to be overwritten. Indices start's from 1,
and how they are represented can be customized using Python's
[https://docs.python.org/3/library/string.html#format-string-syntax|
format string syntax].
Examples:
| `Open Browser` | http://example.com | Chrome | |
| `Open Browser` | http://example.com | Firefox | alias=Firefox |
| `Open Browser` | http://example.com | Edge | remote_url=http://127.0.0.1:4444/wd/hub |
| `Open Browser` | about:blank | | |
| `Open Browser` | browser=Chrome | | |
Alias examples:
| ${1_index} = | `Open Browser` | http://example.com | Chrome | alias=Chrome | # Opens new browser because alias is new. |
| ${2_index} = | `Open Browser` | http://example.com | Firefox | | # Opens new browser because alias is not defined. |
| ${3_index} = | `Open Browser` | http://example.com | Chrome | alias=Chrome | # Switches to the browser with Chrome alias. |
| ${4_index} = | `Open Browser` | http://example.com | Chrome | alias=${1_index} | # Switches to the browser with Chrome alias. |
| Should Be Equal | ${1_index} | ${3_index} | | | |
| Should Be Equal | ${1_index} | ${4_index} | | | |
| Should Be Equal | ${2_index} | ${2} | | | |
Example when using
[https://seleniumhq.github.io/selenium/docs/api/py/webdriver_chrome/selenium.webdriver.chrome.options.html#selenium.webdriver.chrome.options.Options|Chrome options]
method:
| `Open Browser` | http://example.com | Chrome | options=add_argument("--disable-popup-blocking"); add_argument("--ignore-certificate-errors") | # Sting format |
| ${options} = | Get Options | | | # Selenium options instance |
| `Open Browser` | http://example.com | Chrome | options=${options} | |
| `Open Browser` | None | Chrome | options=binary_location="/path/to/binary";add_argument("remote-debugging-port=port") | # Start Chomium-based application |
Example for FirefoxProfile
| `Open Browser` | http://example.com | Firefox | ff_profile_dir=/path/to/profile | # Using profile from disk |
| `Open Browser` | http://example.com | Firefox | ff_profile_dir=${FirefoxProfile_instance} | # Using instance of FirefoxProfile |
| `Open Browser` | http://example.com | Firefox | ff_profile_dir=set_preference("key", "value");set_preference("other", "setting") | # Defining profile using FirefoxProfile mehtods |
If the provided configuration options are not enough, it is possible
to use `Create Webdriver` to customize browser initialization even
more.
Applying ``desired_capabilities`` argument also for local browser is
new in SeleniumLibrary 3.1.
Using ``alias`` to decide, is the new browser opened is new
in SeleniumLibrary 4.0. The ``options`` and ``service_log_path``
are new in SeleniumLibrary 4.0. Support for ``ff_profile_dir``
accepting an instance of the `selenium.webdriver.FirefoxProfile`
and support defining FirefoxProfile with methods and
attributes are new in SeleniumLibrary 4.0.
Making ``url`` optional is new in SeleniumLibrary 4.1.
"""
index = self.drivers.get_index(alias)
if index:
self.info('Using existing browser from index %s.' % index)
self.switch_browser(alias)
if is_truthy(url):
self.go_to(url)
return index
return self._make_new_browser(url, browser, alias, remote_url,
desired_capabilities, ff_profile_dir,
options, service_log_path)
def _make_new_browser(self, url=None, browser='firefox', alias=None,
remote_url=False, desired_capabilities=None,
ff_profile_dir=None, options=None, service_log_path=None):
if is_truthy(remote_url):
self.info("Opening browser '%s' to base url '%s' through "
"remote server at '%s'." % (browser, url, remote_url))
else:
self.info("Opening browser '%s' to base url '%s'." % (browser, url))
driver = self._make_driver(browser, desired_capabilities,
ff_profile_dir, remote_url,
options, service_log_path)
driver = self._wrap_event_firing_webdriver(driver)
index = self.ctx.register_driver(driver, alias)
if is_truthy(url):
try:
driver.get(url)
except Exception:
self.debug("Opened browser with session id %s but failed "
"to open url '%s'." % (driver.session_id, url))
raise
self.debug('Opened browser with session id %s.' % driver.session_id)
return index
@keyword
def create_webdriver(self, driver_name, alias=None, kwargs={},
**init_kwargs):
"""Creates an instance of Selenium WebDriver.
Like `Open Browser`, but allows passing arguments to the created
WebDriver instance directly. This keyword should only be used if
the functionality provided by `Open Browser` is not adequate.
``driver_name`` must be a WebDriver implementation name like Firefox,
Chrome, Ie, Opera, Safari, PhantomJS, or Remote.
The initialized WebDriver can be configured either with a Python
dictionary ``kwargs`` or by using keyword arguments ``**init_kwargs``.
These arguments are passed directly to WebDriver without any
processing. See [https://seleniumhq.github.io/selenium/docs/api/py/api.html|
Selenium API documentation] for details about the supported arguments.
Examples:
| # Use proxy with Firefox | | | |
| ${proxy}= | `Evaluate` | selenium.webdriver.Proxy() | modules=selenium, selenium.webdriver |
| ${proxy.http_proxy}= | `Set Variable` | localhost:8888 | |
| `Create Webdriver` | Firefox | proxy=${proxy} | |
| # Use proxy with PhantomJS | | | |
| ${service args}= | `Create List` | --proxy=192.168.132.104:8888 | |
| `Create Webdriver` | PhantomJS | service_args=${service args} | |
Returns the index of this browser instance which can be used later to
switch back to it. Index starts from 1 and is reset back to it when
`Close All Browsers` keyword is used. See `Switch Browser` for an
example.
"""
if not isinstance(kwargs, dict):
raise RuntimeError("kwargs must be a dictionary.")
for arg_name in kwargs:
if arg_name in init_kwargs:
raise RuntimeError("Got multiple values for argument '%s'." % arg_name)
init_kwargs[arg_name] = kwargs[arg_name]
driver_name = driver_name.strip()
try:
creation_func = getattr(webdriver, driver_name)
except AttributeError:
raise RuntimeError("'%s' is not a valid WebDriver name." % driver_name)
self.info("Creating an instance of the %s WebDriver." % driver_name)
driver = creation_func(**init_kwargs)
self.debug("Created %s WebDriver instance with session id %s."
% (driver_name, driver.session_id))
driver = self._wrap_event_firing_webdriver(driver)
return self.ctx.register_driver(driver, alias)
def _wrap_event_firing_webdriver(self, driver):
if not self.ctx.event_firing_webdriver:
return driver
self.debug('Wrapping driver to event_firing_webdriver.')
return EventFiringWebDriver(driver, self.ctx.event_firing_webdriver())
@keyword
def switch_browser(self, index_or_alias):
"""Switches between active browsers using ``index_or_alias``.
Indices are returned by the `Open Browser` keyword and aliases can
be given to it explicitly. Indices start from 1.
Example:
| `Open Browser` | http://google.com | ff |
| `Location Should Be` | http://google.com | |
| `Open Browser` | http://yahoo.com | ie | alias=second |
| `Location Should Be` | http://yahoo.com | |
| `Switch Browser` | 1 | # index |
| `Page Should Contain` | I'm feeling lucky | |
| `Switch Browser` | second | # alias |
| `Page Should Contain` | More Yahoo! | |
| `Close All Browsers` | | |
Above example expects that there was no other open browsers when
opening the first one because it used index ``1`` when switching to
it later. If you are not sure about that, you can store the index
into a variable as below.
| ${index} = | `Open Browser` | http://google.com |
| # Do something ... | | |
| `Switch Browser` | ${index} | |
"""
try:
self.drivers.switch(index_or_alias)
except RuntimeError:
raise RuntimeError("No browser with index or alias '%s' found."
% index_or_alias)
self.debug('Switched to browser with Selenium session id %s.'
% self.driver.session_id)
@keyword
def get_browser_ids(self):
"""Returns index of all active browser as list.
Example:
| @{browser_ids}= | Get Browser Ids | | |
| FOR | ${id} | IN | @{browser_ids} |
| | @{window_titles}= | Get Window Titles | browser=${id} |
| | Log | Browser ${id} has these windows: ${window_titles} | |
| END | | | |
See `Switch Browser` for more information and examples.
New in SeleniumLibrary 4.0
"""
return self.drivers.active_driver_ids
@keyword
def get_browser_aliases(self):
"""Returns aliases of all active browser that has an alias as NormalizedDict.
The dictionary contains the aliases as keys and the index as value.
This can be accessed as dictionary ``${aliases.key}`` or as list ``@{aliases}[0]``.
Example:
| `Open Browser` | https://example.com | alias=BrowserA | |
| `Open Browser` | https://example.com | alias=BrowserB | |
| &{aliases} | `Get Browser Aliases` | | # &{aliases} = { BrowserA=1|BrowserB=2 } |
| `Log` | ${aliases.BrowserA} | | # logs ``1`` |
| FOR | ${alias} | IN | @{aliases} |
| | `Log` | ${alias} | # logs ``BrowserA`` and ``BrowserB`` |
| END | | | |
See `Switch Browser` for more information and examples.
New in SeleniumLibrary 4.0
"""
return self.drivers.active_aliases
@keyword
def get_session_id(self):
"""Returns the currently active browser session id.
New in SeleniumLibrary 3.2
"""
return self.driver.session_id
@keyword
def get_source(self):
"""Returns the entire HTML source of the current page or frame."""
return self.driver.page_source
@keyword
def get_title(self):
"""Returns the title of the current page."""
return self.driver.title
@keyword
def get_location(self):
"""Returns the current browser window URL."""
return self.driver.current_url
@keyword
def location_should_be(self, url, message=None):
"""Verifies that the current URL is exactly ``url``.
The ``url`` argument contains the exact url that should exist in browser.
The ``message`` argument can be used to override the default error
message.
``message`` argument is new in SeleniumLibrary 3.2.0.
"""
actual = self.get_location()
if actual != url:
if is_noney(message):
message = ("Location should have been '%s' but "
"was '%s'." % (url, actual))
raise AssertionError(message)
self.info("Current location is '%s'." % url)
@keyword
def location_should_contain(self, expected, message=None):
"""Verifies that the current URL contains ``expected``.
The ``expected`` argument contains the expected value in url.
The ``message`` argument can be used to override the default error
message.
``message`` argument is new in SeleniumLibrary 3.2.0.
"""
actual = self.get_location()
if expected not in actual:
if is_noney(message):
message = ("Location should have contained '%s' but "
"it was '%s'." % (expected, actual))
raise AssertionError(message)
self.info("Current location contains '%s'." % expected)
@keyword
def log_location(self):
"""Logs and returns the current browser window URL."""
url = self.get_location()
self.info(url)
return url
@keyword
def log_source(self, loglevel='INFO'):
"""Logs and returns the HTML source of the current page or frame.
The ``loglevel`` argument defines the used log level. Valid log
levels are ``WARN``, ``INFO`` (default), ``DEBUG``, ``TRACE``
and ``NONE`` (no logging).
"""
source = self.get_source()
self.log(source, loglevel)
return source
@keyword
def log_title(self):
"""Logs and returns the title of the current page."""
title = self.get_title()
self.info(title)
return title
@keyword
def title_should_be(self, title, message=None):
"""Verifies that the current page title equals ``title``.
The ``message`` argument can be used to override the default error
message.
``message`` argument is new in SeleniumLibrary 3.1.
"""
actual = self.get_title()
if actual != title:
if is_noney(message):
message = "Title should have been '%s' but was '%s'." % (title, actual)
raise AssertionError(message)
self.info("Page title is '%s'." % title)
@keyword
def go_back(self):
"""Simulates the user clicking the back button on their browser."""
self.driver.back()
@keyword
def go_to(self, url):
"""Navigates the current browser window to the provided ``url``."""
self.info("Opening url '%s'" % url)
self.driver.get(url)
@keyword
def reload_page(self):
"""Simulates user reloading page."""
self.driver.refresh()
@keyword
def get_selenium_speed(self):
"""Gets the delay that is waited after each Selenium command.
The value is returned as a human-readable string like ``1 second``.
See the `Selenium Speed` section above for more information.
"""
return secs_to_timestr(self.ctx.speed)
@keyword
def get_selenium_timeout(self):
"""Gets the timeout that is used by various keywords.
The value is returned as a human-readable string like ``1 second``.
See the `Timeout` section above for more information.
"""
return secs_to_timestr(self.ctx.timeout)
@keyword
def get_selenium_implicit_wait(self):
"""Gets the implicit wait value used by Selenium.
The value is returned as a human-readable string like ``1 second``.
See the `Implicit wait` section above for more information.
"""
return secs_to_timestr(self.ctx.implicit_wait)
@keyword
def set_selenium_speed(self, value):
"""Sets the delay that is waited after each Selenium command.
The value can be given as a number that is considered to be
seconds or as a human-readable string like ``1 second``.
The previous value is returned and can be used to restore
the original value later if needed.
See the `Selenium Speed` section above for more information.
Example:
| `Set Selenium Speed` | 0.5 seconds |
"""
old_speed = self.get_selenium_speed()
self.ctx.speed = timestr_to_secs(value)
for driver in self.drivers.active_drivers:
self._monkey_patch_speed(driver)
return old_speed
@keyword
def set_selenium_timeout(self, value):
"""Sets the timeout that is used by various keywords.
The value can be given as a number that is considered to be
seconds or as a human-readable string like ``1 second``.
The previous value is returned and can be used to restore
the original value later if needed.
See the `Timeout` section above for more information.
Example:
| ${orig timeout} = | `Set Selenium Timeout` | 15 seconds |
| `Open page that loads slowly` |
| `Set Selenium Timeout` | ${orig timeout} |
"""
old_timeout = self.get_selenium_timeout()
self.ctx.timeout = timestr_to_secs(value)
for driver in self.drivers.active_drivers:
driver.set_script_timeout(self.ctx.timeout)
return old_timeout
@keyword
def set_selenium_implicit_wait(self, value):
"""Sets the implicit wait value used by Selenium.
The value can be given as a number that is considered to be
seconds or as a human-readable string like ``1 second``.
The previous value is returned and can be used to restore
the original value later if needed.
This keyword sets the implicit wait for all opened browsers.
Use `Set Browser Implicit Wait` to set it only to the current
browser.
See the `Implicit wait` section above for more information.
Example:
| ${orig wait} = | `Set Selenium Implicit Wait` | 10 seconds |
| `Perform AJAX call that is slow` |
| `Set Selenium Implicit Wait` | ${orig wait} |
"""
old_wait = self.get_selenium_implicit_wait()
self.ctx.implicit_wait = timestr_to_secs(value)
for driver in self.drivers.active_drivers:
driver.implicitly_wait(self.ctx.implicit_wait)
return old_wait
@keyword
def set_browser_implicit_wait(self, value):
"""Sets the implicit wait value used by Selenium.
Same as `Set Selenium Implicit Wait` but only affects the current
browser.
"""
self.driver.implicitly_wait(timestr_to_secs(value))
def _make_driver(self, browser, desired_capabilities=None, profile_dir=None,
remote=None, options=None, service_log_path=None):
driver = WebDriverCreator(self.log_dir).create_driver(
browser=browser, desired_capabilities=desired_capabilities, remote_url=remote,
profile_dir=profile_dir, options=options, service_log_path=service_log_path)
driver.set_script_timeout(self.ctx.timeout)
driver.implicitly_wait(self.ctx.implicit_wait)
if self.ctx.speed:
self._monkey_patch_speed(driver)
return driver
def _monkey_patch_speed(self, driver):
def execute(self, driver_command, params=None):
result = self._base_execute(driver_command, params)
speed = self._speed if hasattr(self, '_speed') else 0.0
if speed > 0:
time.sleep(speed)
return result
if not hasattr(driver, '_base_execute'):
driver._base_execute = driver.execute
driver.execute = types.MethodType(execute, driver)
driver._speed = self.ctx.speed
|
py | b411cb9bdcb951cd4466411e39f14be475ccd04a | """
Analytical template tags and filters.
"""
from __future__ import absolute_import
import logging
from django import template
from django.template import Node, TemplateSyntaxError
from importlib import import_module
from analytical.utils import AnalyticalException
TAG_LOCATIONS = ['head_top', 'head_bottom', 'body_top', 'body_bottom']
TAG_POSITIONS = ['first', None, 'last']
TAG_MODULES = [
'analytical.chartbeat',
'analytical.clickmap',
'analytical.clicky',
'analytical.crazy_egg',
'analytical.facebook_pixel',
'analytical.gauges',
'analytical.google_analytics',
'analytical.google_analytics_js',
'analytical.google_analytics_gtag',
'analytical.gosquared',
'analytical.hotjar',
'analytical.hubspot',
'analytical.intercom',
'analytical.kiss_insights',
'analytical.kiss_metrics',
'analytical.matomo',
'analytical.mixpanel',
'analytical.olark',
'analytical.optimizely',
'analytical.performable',
'analytical.piwik',
'analytical.rating_mailru',
'analytical.snapengage',
'analytical.spring_metrics',
'analytical.uservoice',
'analytical.woopra',
'analytical.yandex_metrica',
]
logger = logging.getLogger(__name__)
register = template.Library()
def _location_tag(location):
def analytical_tag(parser, token):
bits = token.split_contents()
if len(bits) > 1:
raise TemplateSyntaxError("'%s' tag takes no arguments" % bits[0])
return AnalyticalNode(location)
return analytical_tag
for loc in TAG_LOCATIONS:
register.tag('analytical_%s' % loc, _location_tag(loc))
class AnalyticalNode(Node):
def __init__(self, location):
self.nodes = [node_cls() for node_cls in template_nodes[location]]
def render(self, context):
return "".join([node.render(context) for node in self.nodes])
def _load_template_nodes():
template_nodes = dict((loc, dict((pos, []) for pos in TAG_POSITIONS))
for loc in TAG_LOCATIONS)
def add_node_cls(location, node, position=None):
template_nodes[location][position].append(node)
for path in TAG_MODULES:
module = _import_tag_module(path)
try:
module.contribute_to_analytical(add_node_cls)
except AnalyticalException as e:
logger.debug("not loading tags from '%s': %s", path, e)
for location in TAG_LOCATIONS:
template_nodes[location] = sum((template_nodes[location][p]
for p in TAG_POSITIONS), [])
return template_nodes
def _import_tag_module(path):
app_name, lib_name = path.rsplit('.', 1)
return import_module("%s.templatetags.%s" % (app_name, lib_name))
template_nodes = _load_template_nodes()
|
py | b411cba531b9cbcf89cb314ec8d98ff1ec807b96 | # Copyright (c) 2015 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from neutron.agent.common import ovs_lib
from neutron.agent.linux import ip_lib
from neutron.tests.common import net_helpers
from neutron.tests.functional.agent.linux import base
class OVSBridgeTestCase(base.BaseOVSLinuxTestCase):
# TODO(twilson) So far, only ovsdb-related tests are written. It would be
# good to also add the openflow-related functions
def setUp(self):
super(OVSBridgeTestCase, self).setUp()
self.ovs = ovs_lib.BaseOVS()
self.br = self.useFixture(net_helpers.OVSBridgeFixture()).bridge
def create_ovs_port(self, *interface_attrs):
# Convert ((a, b), (c, d)) to {a: b, c: d} and add 'type' by default
attrs = collections.OrderedDict(interface_attrs)
attrs.setdefault('type', 'internal')
port_name = net_helpers.get_rand_port_name()
return (port_name, self.br.add_port(port_name, *attrs.items()))
def create_ovs_vif_port(self, iface_id=None, mac=None,
iface_field='iface-id'):
if iface_id is None:
iface_id = base.get_rand_name()
if mac is None:
mac = base.get_rand_name()
attrs = ('external_ids', {iface_field: iface_id, 'attached-mac': mac})
port_name, ofport = self.create_ovs_port(attrs)
return ovs_lib.VifPort(port_name, ofport, iface_id, mac, self.br)
def test_port_lifecycle(self):
(port_name, ofport) = self.create_ovs_port(('type', 'internal'))
# ofport should always be an integer string with value -1 or > 0.
self.assertTrue(int(ofport))
self.assertTrue(int(self.br.get_port_ofport(port_name)))
self.assertTrue(self.br.port_exists(port_name))
self.assertEqual(self.br.br_name,
self.br.get_bridge_for_iface(port_name))
self.br.delete_port(port_name)
self.assertFalse(self.br.port_exists(port_name))
def test_duplicate_port_may_exist_false(self):
port_name, ofport = self.create_ovs_port(('type', 'internal'))
cmd = self.br.ovsdb.add_port(self.br.br_name,
port_name, may_exist=False)
self.assertRaises(RuntimeError, cmd.execute, check_error=True)
def test_delete_port_if_exists_false(self):
cmd = self.br.ovsdb.del_port('nonexistantport', if_exists=False)
self.assertRaises(RuntimeError, cmd.execute, check_error=True)
def test_replace_port(self):
port_name = net_helpers.get_rand_port_name()
self.br.replace_port(port_name, ('type', 'internal'))
self.assertTrue(self.br.port_exists(port_name))
self.assertEqual('internal',
self.br.db_get_val('Interface', port_name, 'type'))
self.br.replace_port(port_name, ('type', 'internal'),
('external_ids', {'test': 'test'}))
self.assertTrue(self.br.port_exists(port_name))
self.assertEqual('test', self.br.db_get_val('Interface', port_name,
'external_ids')['test'])
def test_attribute_lifecycle(self):
(port_name, ofport) = self.create_ovs_port()
tag = 42
self.ovs.set_db_attribute('Port', port_name, 'tag', tag)
self.assertEqual(tag, self.ovs.db_get_val('Port', port_name, 'tag'))
self.assertEqual(tag, self.br.get_port_tag_dict()[port_name])
self.ovs.clear_db_attribute('Port', port_name, 'tag')
self.assertEqual(self.ovs.db_get_val('Port', port_name, 'tag'), [])
self.assertEqual(self.br.get_port_tag_dict()[port_name], [])
def test_get_bridge_external_bridge_id(self):
self.ovs.set_db_attribute('Bridge', self.br.br_name,
'external_ids',
{'bridge-id': self.br.br_name})
self.assertEqual(
self.br.br_name,
self.ovs.get_bridge_external_bridge_id(self.br.br_name))
def test_controller_lifecycle(self):
controllers = {'tcp:127.0.0.1:6633', 'tcp:172.17.16.10:55'}
self.br.set_controller(controllers)
self.assertSetEqual(controllers, set(self.br.get_controller()))
self.br.del_controller()
self.assertEqual([], self.br.get_controller())
def test_set_fail_mode(self):
self.br.set_secure_mode()
self._assert_br_fail_mode(ovs_lib.FAILMODE_SECURE)
def _assert_br_fail_mode(self, fail_mode):
self.assertEqual(
self.br.db_get_val('Bridge', self.br.br_name, 'fail_mode'),
fail_mode)
def test_set_protocols(self):
self.br.set_protocols('OpenFlow10')
self.assertEqual(
self.br.db_get_val('Bridge', self.br.br_name, 'protocols'),
"OpenFlow10")
def test_get_datapath_id(self):
brdev = ip_lib.IPDevice(self.br.br_name)
dpid = brdev.link.attributes['link/ether'].replace(':', '')
self.br.set_db_attribute('Bridge',
self.br.br_name, 'datapath_id', dpid)
self.assertIn(dpid, self.br.get_datapath_id())
def test_add_tunnel_port(self):
attrs = {
'remote_ip': '192.0.2.1', # RFC 5737 TEST-NET-1
'local_ip': '198.51.100.1', # RFC 5737 TEST-NET-2
}
port_name = net_helpers.get_rand_port_name()
self.br.add_tunnel_port(port_name, attrs['remote_ip'],
attrs['local_ip'])
self.assertEqual(self.ovs.db_get_val('Interface', port_name, 'type'),
'gre')
options = self.ovs.db_get_val('Interface', port_name, 'options')
for attr, val in attrs.items():
self.assertEqual(val, options[attr])
def test_add_patch_port(self):
local = net_helpers.get_rand_port_name()
peer = 'remotepeer'
self.br.add_patch_port(local, peer)
self.assertEqual(self.ovs.db_get_val('Interface', local, 'type'),
'patch')
options = self.ovs.db_get_val('Interface', local, 'options')
self.assertEqual(peer, options['peer'])
def test_get_port_name_list(self):
# Note that ovs-vsctl's list-ports does not include the port created
# with the same name as the bridge
ports = {self.create_ovs_port()[0] for i in range(5)}
self.assertSetEqual(ports, set(self.br.get_port_name_list()))
def test_get_port_stats(self):
# Nothing seems to use this function?
(port_name, ofport) = self.create_ovs_port()
stats = set(self.br.get_port_stats(port_name).keys())
self.assertTrue(set(['rx_packets', 'tx_packets']).issubset(stats))
def test_get_vif_ports(self):
for i in range(2):
self.create_ovs_port()
vif_ports = [self.create_ovs_vif_port() for i in range(3)]
ports = self.br.get_vif_ports()
self.assertEqual(3, len(ports))
self.assertTrue(all([isinstance(x, ovs_lib.VifPort) for x in ports]))
self.assertEqual(sorted([x.port_name for x in vif_ports]),
sorted([x.port_name for x in ports]))
def test_get_vif_port_set(self):
for i in range(2):
self.create_ovs_port()
vif_ports = [self.create_ovs_vif_port() for i in range(2)]
ports = self.br.get_vif_port_set()
expected = set([x.vif_id for x in vif_ports])
self.assertEqual(expected, ports)
def test_get_port_tag_dict(self):
# Simple case tested in port test_set_get_clear_db_val
pass
def test_get_vif_port_by_id(self):
for i in range(2):
self.create_ovs_port()
vif_ports = [self.create_ovs_vif_port() for i in range(3)]
for vif in vif_ports:
self.assertEqual(self.br.get_vif_port_by_id(vif.vif_id).vif_id,
vif.vif_id)
def test_delete_ports(self):
# TODO(twilson) I intensely dislike the current delete_ports function
# as the default behavior is really delete_vif_ports(), then it acts
# more like a delete_ports() seems like it should if all_ports=True is
# passed
# Create 2 non-vif ports and 2 vif ports
nonvifs = {self.create_ovs_port()[0] for i in range(2)}
vifs = {self.create_ovs_vif_port().port_name for i in range(2)}
self.assertSetEqual(nonvifs.union(vifs),
set(self.br.get_port_name_list()))
self.br.delete_ports()
self.assertSetEqual(nonvifs, set(self.br.get_port_name_list()))
self.br.delete_ports(all_ports=True)
self.assertEqual(len(self.br.get_port_name_list()), 0)
def test_reset_bridge(self):
self.create_ovs_port()
self.br.reset_bridge()
self.assertEqual(len(self.br.get_port_name_list()), 0)
self._assert_br_fail_mode([])
def test_reset_bridge_secure_mode(self):
self.br.reset_bridge(secure_mode=True)
self._assert_br_fail_mode(ovs_lib.FAILMODE_SECURE)
class OVSLibTestCase(base.BaseOVSLinuxTestCase):
def setUp(self):
super(OVSLibTestCase, self).setUp()
self.ovs = ovs_lib.BaseOVS()
def test_bridge_lifecycle_baseovs(self):
name = base.get_rand_name(prefix=net_helpers.BR_PREFIX)
self.addCleanup(self.ovs.delete_bridge, name)
br = self.ovs.add_bridge(name)
self.assertEqual(br.br_name, name)
self.assertTrue(self.ovs.bridge_exists(name))
self.ovs.delete_bridge(name)
self.assertFalse(self.ovs.bridge_exists(name))
def test_get_bridges(self):
bridges = {
self.useFixture(net_helpers.OVSBridgeFixture()).bridge.br_name
for i in range(5)}
self.assertTrue(set(self.ovs.get_bridges()).issuperset(bridges))
def test_bridge_lifecycle_ovsbridge(self):
name = base.get_rand_name(prefix=net_helpers.BR_PREFIX)
br = ovs_lib.OVSBridge(name)
self.assertEqual(br.br_name, name)
# Make sure that instantiating an OVSBridge does not actually create
self.assertFalse(self.ovs.bridge_exists(name))
self.addCleanup(self.ovs.delete_bridge, name)
br.create()
self.assertTrue(self.ovs.bridge_exists(name))
br.destroy()
self.assertFalse(self.ovs.bridge_exists(name))
|
py | b411cba88d1ad2f6321ad42a9ce99b35feb35edc | # USAGE
# python detect_drowsiness.py --shape-predictor shape_predictor_68_face_landmarks.dat
# python detect_drowsiness.py --shape-predictor shape_predictor_68_face_landmarks.dat --alarm alarm.wav
from pynput.keyboard import Key, Controller
from scipy.spatial import distance as dist
from imutils.video import VideoStream
from imutils import face_utils
from threading import Thread
import webbrowser as wb
import numpy as np
import argparse
import imutils
import time
import dlib
import cv2
keyboard = Controller()
def mouth_aspect_ratio(mouth):
A = dist.euclidean(mouth[2], mouth[10]) # 51, 59
B = dist.euclidean(mouth[4], mouth[8]) # 53, 57
C = dist.euclidean(mouth[0], mouth[6]) # 49, 55
mar = (A + B) / (2.0 * C)
return mar
# construct the argument parse and parse the arguments
'''ap = argparse.ArgumentParser()
ap.add_argument("-p", "--shape-predictor", required=False, default='shape_predictor_68_face_landmarks.dat',
help="path to facial landmark predictor")
ap.add_argument("-w", "--webcam", type=int, default=0,
help="index of webcam on system")
args = vars(ap.parse_args())
'''
# define one constants, for mouth aspect ratio to indicate open mouth
MOUTH_AR_THRESH = 0.761
print("[INFO] loading facial landmark predictor...")
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')
# grab the indexes of the facial landmarks for the mouth
(mStart, mEnd) = (49, 68)
# start the video stream thread
print("[INFO] starting video stream thread...")
vs = VideoStream(src=0).start()
time.sleep(1.0)
frame_width = 640
frame_height = 360
wb.open_new("chrome://dino")
# loop over frames from the video stream
while True:
start = time.time()
frame = vs.read()
frame = imutils.resize(frame, width=500)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
rects = detector(gray, 0)
for rect in rects:
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
mouth = shape[mStart:mEnd]
mouthMAR = mouth_aspect_ratio(mouth)
mar = mouthMAR
mouthHull = cv2.convexHull(mouth)
cv2.drawContours(frame, [mouthHull], -1, (0, 255, 0), 1)
cv2.putText(frame, "MAR: {:.2f}".format(mar), (30, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
if mar > MOUTH_AR_THRESH:
keyboard.press(Key.space)
cv2.putText(frame, "Mouth is Open!", (30,60),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,0,255),2)
print("[DATA] Original MAR is {:.5f}, Duration : {:.5f}".format(mar,time.time() - start))
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
cv2.destroyAllWindows()
vs.stop()
|
py | b411cd7114de73121d3eaff066a2b256b6df28a4 | """
Class of n-link arm in 3D
Author: Takayuki Murooka (takayuki5168)
"""
import numpy as np
import math
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
class Link:
def __init__(self, dh_params):
self.dh_params_ = dh_params
def transformation_matrix(self):
theta = self.dh_params_[0]
alpha = self.dh_params_[1]
a = self.dh_params_[2]
d = self.dh_params_[3]
st = math.sin(theta)
ct = math.cos(theta)
sa = math.sin(alpha)
ca = math.cos(alpha)
trans = np.array([[ct, -st * ca, st * sa, a * ct],
[st, ct * ca, -ct * sa, a * st],
[0, sa, ca, d],
[0, 0, 0, 1]])
return trans
@staticmethod
def basic_jacobian(trans_prev, ee_pos):
pos_prev = np.array(
[trans_prev[0, 3], trans_prev[1, 3], trans_prev[2, 3]])
z_axis_prev = np.array(
[trans_prev[0, 2], trans_prev[1, 2], trans_prev[2, 2]])
basic_jacobian = np.hstack(
(np.cross(z_axis_prev, ee_pos - pos_prev), z_axis_prev))
return basic_jacobian
class NLinkArm:
def __init__(self, dh_params_list):
self.fig = plt.figure()
self.ax = Axes3D(self.fig)
self.link_list = []
for i in range(len(dh_params_list)):
self.link_list.append(Link(dh_params_list[i]))
def transformation_matrix(self):
trans = np.identity(4)
for i in range(len(self.link_list)):
trans = np.dot(trans, self.link_list[i].transformation_matrix())
return trans
def forward_kinematics(self, plot=False):
trans = self.transformation_matrix()
x = trans[0, 3]
y = trans[1, 3]
z = trans[2, 3]
alpha, beta, gamma = self.euler_angle()
if plot:
x_list = []
y_list = []
z_list = []
trans = np.identity(4)
x_list.append(trans[0, 3])
y_list.append(trans[1, 3])
z_list.append(trans[2, 3])
for i in range(len(self.link_list)):
trans = np.dot(trans, self.link_list[i].transformation_matrix())
x_list.append(trans[0, 3])
y_list.append(trans[1, 3])
z_list.append(trans[2, 3])
self.ax.plot(x_list, y_list, z_list, "o-", color="#00aa00", ms=4,
mew=0.5)
self.ax.plot([0], [0], [0], "o")
self.ax.set_xlim(-1, 1)
self.ax.set_ylim(-1, 1)
self.ax.set_zlim(-1, 1)
plt.show()
return [x, y, z, alpha, beta, gamma]
def basic_jacobian(self):
ee_pos = self.forward_kinematics()[0:3]
basic_jacobian_mat = []
trans = np.identity(4)
for i in range(len(self.link_list)):
basic_jacobian_mat.append(
self.link_list[i].basic_jacobian(trans, ee_pos))
trans = np.dot(trans, self.link_list[i].transformation_matrix())
return np.array(basic_jacobian_mat).T
def inverse_kinematics(self, ref_ee_pose, plot=False):
for cnt in range(500):
ee_pose = self.forward_kinematics()
diff_pose = np.array(ref_ee_pose) - ee_pose
basic_jacobian_mat = self.basic_jacobian()
alpha, beta, gamma = self.euler_angle()
K_zyz = np.array(
[[0, -math.sin(alpha), math.cos(alpha) * math.sin(beta)],
[0, math.cos(alpha), math.sin(alpha) * math.sin(beta)],
[1, 0, math.cos(beta)]])
K_alpha = np.identity(6)
K_alpha[3:, 3:] = K_zyz
theta_dot = np.dot(
np.dot(np.linalg.pinv(basic_jacobian_mat), K_alpha),
np.array(diff_pose))
self.update_joint_angles(theta_dot / 100.)
if plot:
self.fig = plt.figure()
self.ax = Axes3D(self.fig)
x_list = []
y_list = []
z_list = []
trans = np.identity(4)
x_list.append(trans[0, 3])
y_list.append(trans[1, 3])
z_list.append(trans[2, 3])
for i in range(len(self.link_list)):
trans = np.dot(trans, self.link_list[i].transformation_matrix())
x_list.append(trans[0, 3])
y_list.append(trans[1, 3])
z_list.append(trans[2, 3])
self.ax.plot(x_list, y_list, z_list, "o-", color="#00aa00", ms=4,
mew=0.5)
self.ax.plot([0], [0], [0], "o")
self.ax.set_xlim(-1, 1)
self.ax.set_ylim(-1, 1)
self.ax.set_zlim(-1, 1)
self.ax.plot([ref_ee_pose[0]], [ref_ee_pose[1]], [ref_ee_pose[2]],
"o")
plt.show()
def euler_angle(self):
trans = self.transformation_matrix()
alpha = math.atan2(trans[1][2], trans[0][2])
if not (-math.pi / 2 <= alpha <= math.pi / 2):
alpha = math.atan2(trans[1][2], trans[0][2]) + math.pi
if not (-math.pi / 2 <= alpha <= math.pi / 2):
alpha = math.atan2(trans[1][2], trans[0][2]) - math.pi
beta = math.atan2(
trans[0][2] * math.cos(alpha) + trans[1][2] * math.sin(alpha),
trans[2][2])
gamma = math.atan2(
-trans[0][0] * math.sin(alpha) + trans[1][0] * math.cos(alpha),
-trans[0][1] * math.sin(alpha) + trans[1][1] * math.cos(alpha))
return alpha, beta, gamma
def set_joint_angles(self, joint_angle_list):
for i in range(len(self.link_list)):
self.link_list[i].dh_params_[0] = joint_angle_list[i]
def update_joint_angles(self, diff_joint_angle_list):
for i in range(len(self.link_list)):
self.link_list[i].dh_params_[0] += diff_joint_angle_list[i]
def plot(self):
self.fig = plt.figure()
self.ax = Axes3D(self.fig)
x_list = []
y_list = []
z_list = []
trans = np.identity(4)
x_list.append(trans[0, 3])
y_list.append(trans[1, 3])
z_list.append(trans[2, 3])
for i in range(len(self.link_list)):
trans = np.dot(trans, self.link_list[i].transformation_matrix())
x_list.append(trans[0, 3])
y_list.append(trans[1, 3])
z_list.append(trans[2, 3])
self.ax.plot(x_list, y_list, z_list, "o-", color="#00aa00", ms=4,
mew=0.5)
self.ax.plot([0], [0], [0], "o")
self.ax.set_xlabel("x")
self.ax.set_ylabel("y")
self.ax.set_zlabel("z")
self.ax.set_xlim(-1, 1)
self.ax.set_ylim(-1, 1)
self.ax.set_zlim(-1, 1)
plt.show()
|
py | b411ced5e861ec3e081468b335bbdec36390ff97 | from typing import *
class OrderedStream:
def __init__(self, n: int):
self.stream = [None] * (n + 1)
self.ptr = 1
def insert(self, id: int, value: str) -> List[str]:
self.stream[id] = value
if self.stream[self.ptr] != None:
end = self.ptr + 1
for i in range(self.ptr + 1, len(self.stream)):
if self.stream[i]:
end += 1
else:
break
ans = self.stream[self.ptr:end]
self.ptr = end
return ans
else:
return []
# Your OrderedStream object will be instantiated and called as such:
# obj = OrderedStream(n)
# param_1 = obj.insert(id,value)
|
py | b411d11ba3469a594b196c6146c5df629b9aac94 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Given a collection of candidate numbers (C) and a target number (T),
find all unique combinations in C where the candidate numbers sums to T.
**Each number in C may only be used once in the combination.**
Note:
All numbers (including target) will be positive integers.
Elements in a combination (a1, a2, … , ak) must be in non-descending order.
(ie, a1 ≤ a2 ≤ … ≤ ak).
The solution set must not contain duplicate combinations.
For example, given candidate set 10,1,2,7,6,1,5 and target 8,
A solution set is:
[1, 7]
[1, 2, 5]
[2, 6]
[1, 1, 6]
'''
class Solution(object):
def combinationSum2(self, candidates, target):
"""
:type candidates: List[int]
:type target: int
:rtype: List[List[int]]
"""
results = []
result = []
candidates.sort()
self.dfs(candidates, target, results, result, 0)
return results
def dfs(self, nums, target, results, result, index):
if target == 0:
results.append(result)
return
i = index
while i < len(nums):
if nums[i] > target:
break
self.dfs(nums, target-nums[i], results, result+[nums[i]], i+1)
# avoid duplicate, this depth is already got the same value
while i + 1 < len(nums) and nums[i] == nums[i+1]:
i += 1
i += 1
|
py | b411d12b21b4bafed8758e6b3fbbf6fb33916ecc | from dataclasses import dataclass
from datetime import datetime
from typing import List, Optional
@dataclass(frozen=True)
class SessionInfo:
"""The session information for a paper."""
session_name: str
start_time: datetime
end_time: datetime
zoom_link: str
@property
def time_string(self) -> str:
return "({}-{} GMT)".format(
self.start_time.strftime("%H:%M"), self.end_time.strftime("%H:%M")
)
@property
def session(self) -> str:
start_day = self.start_time.strftime("%a")
if self.session_name.startswith("D"):
# demo sessions
return f"Demo Session {self.session_name[1:]} {start_day}"
return f"Session {self.session_name} {start_day}"
@dataclass(frozen=True)
class PaperContent:
"""The content of a paper.
Needs to be synced with static/js/papers.js and static/js/paper_vis.js.
"""
# needs to be synced with
title: str
authors: List[str]
track: str
paper_type: str
abstract: str
tldr: str
keywords: List[str]
pdf_url: Optional[str]
demo_url: Optional[str]
sessions: List[SessionInfo]
similar_paper_uids: List[str]
def __post_init__(self):
assert self.track, self
if self.pdf_url:
assert self.pdf_url.startswith("https://"), self.pdf_url
if self.demo_url:
assert self.demo_url.startswith("https://"), self.demo_url
assert self.paper_type[0].isupper(), self
@dataclass(frozen=True)
class Paper:
"""The paper dataclass.
This corresponds to an entry in the `papers.json`.
See the `start()` method in static/js/papers.js.
"""
id: str
forum: str
card_image_path: str
presentation_id: str
content: PaperContent
@property
def rocketchat_channel(self) -> str:
return f"paper-{self.id.replace('.', '-')}"
@dataclass(frozen=True)
class PlenarySession:
id: str
title: str
image: str
date: str
day: str
time: Optional[str]
speaker: Optional[str]
institution: Optional[str]
abstract: Optional[str]
bio: Optional[str]
# SlidesLive presentation ID
presentation_id: Optional[str]
rocketchat_channel: Optional[str]
qa_time: Optional[str]
zoom_link: Optional[str]
@dataclass(frozen=True)
class CommitteeMember:
role: str
name: str
aff: str
im: Optional[str]
tw: Optional[str]
@dataclass(frozen=True)
class Tutorial:
id: str
title: str
organizers: List[str]
abstract: str
material: str
prerecorded: Optional[str]
livestream: Optional[str]
zoom_link: Optional[str]
session1_time: Optional[str]
session2_time: Optional[str]
virtual_format_description: str
@dataclass(frozen=True)
class WorkshopPaper:
id: str
title: str
speakers: str
presentation_id: Optional[str]
@dataclass(frozen=True)
class Workshop:
id: str
title: str
organizers: List[str]
abstract: str
material: str
livestream: Optional[str]
papers: List[WorkshopPaper]
|
py | b411d27c5efaa2e4d17a89e571b54fdfe83b4cec | from selenium.webdriver.common.by import By
from Tests.pages.page import Page
import time
class MainPage(Page):
@property
def add_button(self):
return self.driver.find_element_by_css_selector("a[href = './?go=add']")
@property
def movie_for_delete(self):
return self.driver.find_element_by_css_selector('div[id^="movie"]')
@property
def no_movies(self):
if self.driver.find_element_by_id('results').text == 'No movies where found.':
time.sleep(5)
self.driver.quit()
@property
def remove_button(self):
return self.driver.find_element_by_css_selector('a[onclick^="if"]')
@property
def alert_when_delete(self):
self.driver.switch_to_alert().accept()
@property
def search_field(self):
return self.driver.find_element_by_id('q')
@property
def is_this_found_movie(self):
return self.is_element_visible((By.CSS_SELECTOR,'div[class="title"]'))
@property
def there_is_movie(self):
assert "No movies where found." not in self.driver.page_source |
py | b411d47d85e4023ddfd49781ba225f03bcc0fc28 | expected_output={
"number_of_service_types":15,
"number_of_records_of_types_PTR":15,
"number_of_records_of_types_SRV":15,
"number_of_records_of_types_A":1,
"number_of_records_of_types_AAAA":2,
"number_of_records_of_types_TXT":15
}
|
py | b411d4cd43962f9eca6738f31ee98e5c30321a98 | ''' Frustum PointNets v2 Model.
'''
from __future__ import print_function
import sys
import os
import tensorflow as tf
import numpy as np
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
import tf_util
from pointnet_util import pointnet_sa_module, pointnet_sa_module_msg, pointnet_fp_module
from model_util import NUM_HEADING_BIN, NUM_SIZE_CLUSTER, NUM_OBJECT_POINT
from model_util import point_cloud_masking, get_center_regression_net
from model_util import placeholder_inputs, parse_output_to_tensors, get_loss
def get_instance_seg_v2_net(point_cloud, one_hot_vec,
is_training, bn_decay, end_points):
''' 3D instance segmentation PointNet v2 network.
Input:
point_cloud: TF tensor in shape (B,N,4)
frustum point clouds with XYZ and intensity in point channels
XYZs are in frustum coordinate
one_hot_vec: TF tensor in shape (B,3)
length-3 vectors indicating predicted object type
is_training: TF boolean scalar
bn_decay: TF float scalar
end_points: dict
Output:
logits: TF tensor in shape (B,N,2), scores for bkg/clutter and object
end_points: dict
'''
l0_xyz = tf.slice(point_cloud, [0,0,0], [-1,-1,3])
l0_points = tf.slice(point_cloud, [0,0,3], [-1,-1,1])
# Set abstraction layers
l1_xyz, l1_points = pointnet_sa_module_msg(l0_xyz, l0_points,
128, [0.2,0.4,0.8], [32,64,128],
[[32,32,64], [64,64,128], [64,96,128]],
is_training, bn_decay, scope='layer1')
l2_xyz, l2_points = pointnet_sa_module_msg(l1_xyz, l1_points,
32, [0.4,0.8,1.6], [64,64,128],
[[64,64,128], [128,128,256], [128,128,256]],
is_training, bn_decay, scope='layer2')
l3_xyz, l3_points, _ = pointnet_sa_module(l2_xyz, l2_points,
npoint=None, radius=None, nsample=None, mlp=[128,256,1024],
mlp2=None, group_all=True, is_training=is_training,
bn_decay=bn_decay, scope='layer3')
# Feature Propagation layers
l3_points = tf.concat([l3_points, tf.expand_dims(one_hot_vec, 1)], axis=2)
l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points,
[128,128], is_training, bn_decay, scope='fa_layer1')
l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points,
[128,128], is_training, bn_decay, scope='fa_layer2')
l0_points = pointnet_fp_module(l0_xyz, l1_xyz,
tf.concat([l0_xyz,l0_points],axis=-1), l1_points,
[128,128], is_training, bn_decay, scope='fa_layer3')
# FC layers
net = tf_util.conv1d(l0_points, 128, 1, padding='VALID', bn=True,
is_training=is_training, scope='conv1d-fc1', bn_decay=bn_decay)
end_points['feats'] = net
net = tf_util.dropout(net, keep_prob=0.7,
is_training=is_training, scope='dp1')
logits = tf_util.conv1d(net, 2, 1,
padding='VALID', activation_fn=None, scope='conv1d-fc2')
return logits, end_points
def get_3d_box_estimation_v2_net(object_point_cloud, one_hot_vec,
is_training, bn_decay, end_points):
''' 3D Box Estimation PointNet v2 network.
Input:
object_point_cloud: TF tensor in shape (B,M,C)
masked point clouds in object coordinate
one_hot_vec: TF tensor in shape (B,3)
length-3 vectors indicating predicted object type
Output:
output: TF tensor in shape (B,3+NUM_HEADING_BIN*2+NUM_SIZE_CLUSTER*4)
including box centers, heading bin class scores and residuals,
and size cluster scores and residuals
'''
# Gather object points
print("one_hot_vec: ", one_hot_vec)
batch_size = object_point_cloud.get_shape()[0].value
l0_xyz = object_point_cloud
l0_points = None
# Set abstraction layers
l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz, l0_points,
npoint=128, radius=0.2, nsample=64, mlp=[64,64,128],
mlp2=None, group_all=False,
is_training=is_training, bn_decay=bn_decay, scope='ssg-layer1')
l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points,
npoint=32, radius=0.4, nsample=64, mlp=[128,128,256],
mlp2=None, group_all=False,
is_training=is_training, bn_decay=bn_decay, scope='ssg-layer2')
l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points,
npoint=None, radius=None, nsample=None, mlp=[256,256,512],
mlp2=None, group_all=True,
is_training=is_training, bn_decay=bn_decay, scope='ssg-layer3')
# Fully connected layers
net = tf.reshape(l3_points, [batch_size, -1])
net = tf.concat([net, one_hot_vec], axis=1)
net = tf_util.fully_connected(net, 512, bn=True,
is_training=is_training, scope='fc1', bn_decay=bn_decay)
net = tf_util.fully_connected(net, 256, bn=True,
is_training=is_training, scope='fc2', bn_decay=bn_decay)
# The first 3 numbers: box center coordinates (cx,cy,cz),
# the next NUM_HEADING_BIN*2: heading bin class scores and bin residuals
# next NUM_SIZE_CLUSTER*4: box cluster scores and residuals
output = tf_util.fully_connected(net,
3+NUM_HEADING_BIN*2+NUM_SIZE_CLUSTER*4, activation_fn=None, scope='fc3')
return output, end_points
def get_model(point_cloud, one_hot_vec, is_training, bn_decay=None):
''' Frustum PointNets model. The model predict 3D object masks and
amodel bounding boxes for objects in frustum point clouds.
Input:
point_cloud: TF tensor in shape (B,N,4)
frustum point clouds with XYZ and intensity in point channels
XYZs are in frustum coordinate
one_hot_vec: TF tensor in shape (B,3)
length-3 vectors indicating predicted object type
is_training: TF boolean scalar
bn_decay: TF float scalar
Output:
end_points: dict (map from name strings to TF tensors)
'''
end_points = {}
# 3D Instance Segmentation PointNet
logits, end_points = get_instance_seg_v2_net(\
point_cloud, one_hot_vec,
is_training, bn_decay, end_points)
end_points['mask_logits'] = logits
# Masking
# select masked points and translate to masked points' centroid
object_point_cloud_xyz, mask_xyz_mean, end_points = \
point_cloud_masking(point_cloud, logits, end_points)
# T-Net and coordinate translation
center_delta, end_points = get_center_regression_net(\
object_point_cloud_xyz, one_hot_vec,
is_training, bn_decay, end_points)
stage1_center = center_delta + mask_xyz_mean # Bx3
end_points['stage1_center'] = stage1_center
# Get object point cloud in object coordinate
object_point_cloud_xyz_new = \
object_point_cloud_xyz - tf.expand_dims(center_delta, 1)
# Amodel Box Estimation PointNet
output, end_points = get_3d_box_estimation_v2_net(\
object_point_cloud_xyz_new, one_hot_vec,
is_training, bn_decay, end_points)
# Parse output to 3D box parameters
end_points = parse_output_to_tensors(output, end_points)
end_points['center'] = end_points['center_boxnet'] + stage1_center # Bx3
return end_points
if __name__=='__main__':
with tf.Graph().as_default():
inputs = tf.zeros((32,1024,4))
outputs = get_model(inputs, tf.ones((32,3)), tf.constant(True))
for key in outputs:
print((key, outputs[key]))
loss = get_loss(tf.zeros((32,1024),dtype=tf.int32),
tf.zeros((32,3)), tf.zeros((32,),dtype=tf.int32),
tf.zeros((32,)), tf.zeros((32,),dtype=tf.int32),
tf.zeros((32,3)), outputs)
print(loss)
|
py | b411d4e79d7eb55e039973668fcc5b35c8750e09 | import sys
import os
import time
import re
import cherrypy
from cherrypy.lib import static
import weblcm_def
import subprocess
from pathlib import Path
from pylibconfig import Config
from threading import Lock
from weblcm_settings import SystemSettingsManage
from syslog import syslog
@cherrypy.expose
class FileManage(object):
''' File Management '''
_lock = Lock()
FILE_MANAGE_SCRIPT='/etc/weblcm-python/scripts/weblcm_files.sh'
#log will be saved in /var/run/log/journal/ for volatile mode, or /var/log/journal/ for persistent mode
#If "/var/run/log/journal/" exists, it should be in volatile mode.
_log_data_dir = "/var/run/log/journal/"
if not os.path.exists("/var/run/log/journal/"):
_log_data_dir = "/var/log/journal/"
def save_file(self, typ, fil):
path = os.path.normpath(os.path.join(weblcm_def.FILEDIR_DICT.get(typ), fil.filename))
with open(path, 'wb+') as out:
while True:
data = fil.file.read(8192)
if not data:
break
out.write(data)
out.close()
return path
def POST(self, *args, **kwargs):
typ = kwargs.get('type', None)
fil = kwargs.get('file', None)
if not typ or not fil:
if not typ:
syslog('FileManage POST - no type specified')
if not fil:
syslog('FileManage POST - no filename provided')
raise cherrypy.HTTPError(400, 'filename or type missing') #bad request
if typ not in weblcm_def.FILEDIR_DICT:
syslog(f'FileManage POST type {typ} unknown')
raise cherrypy.HTTPError(400, f'FileManage POST type {typ} unknown') #bad request
with FileManage._lock:
fp = self.save_file(typ, fil)
if os.path.isfile(fp):
if fp.endswith(".zip"):
password = kwargs.get('password', "")
p = subprocess.Popen([
FileManage.FILE_MANAGE_SCRIPT, typ, "unzip", fp, weblcm_def.FILEDIR_DICT.get(typ), password
])
res = p.wait()
os.remove(fp)
if res:
syslog(f"unzip command file '{fp}' failed with error {res}")
raise cherrypy.HTTPError(500, f'unzip command failed to unzip provided file. Error returned: {res}') #Internal server error
return
syslog(f"unable to obtain FileManage._lock")
raise cherrypy.HTTPError(500, 'unable to obtain internal file lock') #Internal server error
def GET(self, *args, **kwargs):
typ = kwargs.get('type', None)
if not typ:
syslog('FileManage Get - no filename provided')
raise cherrypy.HTTPError(400, 'no filename provided')
fil = '{0}{1}'.format(typ, ".zip")
path = '{0}{1}'.format("/tmp/", fil)
if typ == "config":
password = kwargs.get('password', None)
if not password:
syslog('FileManage Get - no password provided')
raise cherrypy.HTTPError(400, 'no password provided')
p = subprocess.Popen([
FileManage.FILE_MANAGE_SCRIPT, "config", "zip",
weblcm_def.FILEDIR_DICT.get(typ), path, password
])
syslog("Configuration zipped for user")
elif typ == "log":
password = kwargs.get('password', None)
if not password:
syslog('FileManage Get - no password provided')
raise cherrypy.HTTPError(400, 'no password provided')
p = subprocess.Popen([
FileManage.FILE_MANAGE_SCRIPT, "log", "zip",
FileManage._log_data_dir, path, password
])
syslog("System log zipped for user")
elif typ == "debug":
p = subprocess.Popen([
FileManage.FILE_MANAGE_SCRIPT, "debug", "zip",
' '.join([FileManage._log_data_dir, weblcm_def.FILEDIR_DICT.get('config')]),
path, SystemSettingsManage.get_cert_for_file_encryption()
])
syslog("Configuration and system log zipped/encrypted for user")
else:
syslog(f"FileManage GET - unknown file type {typ}")
raise cherrypy.HTTPError(400, f'unknown file type {typ}')
p.wait()
if os.path.isfile(path):
objFile = static.serve_file(path, 'application/x-download', 'attachment', fil)
os.unlink(path);
return objFile;
syslog(f"Failed to create file {path} for user")
raise cherrypy.HTTPError(500, f'failed to create file {path}')
@cherrypy.tools.json_out()
def DELETE(self, *args, **kwargs):
result = {
'SDCERR': weblcm_def.WEBLCM_ERRORS.get('SDCERR_FAIL'),
'InfoMsg': 'Unable to delete file'
}
typ = kwargs.get('type', None)
fil = kwargs.get('file', None)
if not typ or not fil:
if not typ:
syslog('FileManage DELETE - no type specified')
result['InfoMsg'] = 'no type specified'
if not fil:
syslog('FileManage DELETE - no filename provided')
result['InfoMsg'] = 'no file specified'
# raise cherrypy.HTTPError(400, 'missing type or file') #bad request
return result
valid = ['cert','pac']
if not typ in valid:
# raise cherrypy.HTTPError(400, f"type not one of {valid}")
result['InfoMsg'] = f'type not one of {valid}'
return result
path = os.path.normpath(os.path.join(weblcm_def.FILEDIR_DICT.get(typ), fil))
if os.path.isfile(path):
os.remove(path);
if not os.path.exists(path):
result['SDCERR'] = weblcm_def.WEBLCM_ERRORS.get('SDCERR_SUCCESS')
result['InfoMsg'] = f'file {fil} deleted'
syslog(f'file {fil} deleted')
else:
syslog(f'Attempt to remove file {path} did not succeed')
else:
syslog(f'Attempt to remove non-existant file {path}')
result['InfoMsg'] = f"File: {fil} not present"
return result
@cherrypy.expose
class FilesManage(object):
@cherrypy.tools.json_out()
def GET(self, *args, **kwargs):
typ = kwargs.get('type', None)
if not typ:
syslog('FilesManage GET - no type provided')
raise cherrypy.HTTPError(400, 'no filename provided')
files = []
with os.scandir(weblcm_def.FILEDIR_DICT.get(typ)) as listOfEntries:
for entry in listOfEntries:
if entry.is_file():
strs = entry.name.split('.')
if len(strs) == 2 and strs[1] in weblcm_def.FILEFMT_DICT.get(typ):
files.append(entry.name)
files.sort()
return files
@cherrypy.expose
class AWMCfgManage(object):
_lock = Lock()
@cherrypy.tools.json_out()
def GET(self, *args, **kwargs):
#Infinite geo-location checks by default
result = {
'SDCERR': weblcm_def.WEBLCM_ERRORS.get('SDCERR_SUCCESS'),
'InfoMsg': 'AWM configuration only supported in LITE mode',
'geolocation_scanning_enable': 1,
}
# check if there is a configuration file which contains a "scan_attempts:0" entry
# if configuration file does not exist, scan_attempts is not disabled
f = cherrypy.request.app.config['weblcm'].get('awm_cfg', None)
if not f:
return result
if not os.path.isfile(f):
return result
config = Config()
with AWMCfgManage._lock:
config.readFile(f)
if config.exists("scan_attempts"):
result['geolocation_scanning_enable'] = config.value("scan_attempts")[0]
result['ErrorMesg'] = ''
return result
@cherrypy.tools.accept(media='application/json')
@cherrypy.tools.json_in()
@cherrypy.tools.json_out()
def PUT(self):
# Enable/disable geolocation scanning
# 0: disable geolocation scanning
# others: enable geolocation scanning
result = {
'SDCERR': weblcm_def.WEBLCM_ERRORS.get('SDCERR_FAIL'),
'InfoMsg': "AWM's geolocation scanning configuration only supported in LITE mode",
'geolocation_scanning_enable': 1,
}
# determine if in LITE mode
litemode = False
try:
file = open("/etc/default/adaptive_ww","r")
for line in file:
if re.search('LITE', line):
litemode = True
break
except Exception as e:
pass
if not litemode:
return result
#prep for next error condition
result['InfoMsg'] = 'No writable configuration file found'
# check if there is a configuration file which contains a "scan_attempts:0" entry
# if writable configuration file does not exist, scan_attempts can not be modified
fp = cherrypy.request.app.config['weblcm'].get('awm_cfg', None);
if not fp:
return result
d = Path(os.path.dirname(fp))
d.mkdir(exist_ok=True)
geolocation_scanning_enable = cherrypy.request.json.get('geolocation_scanning_enable', 0)
config = Config()
with AWMCfgManage._lock:
if os.path.isfile(fp):
config.readFile(fp)
if geolocation_scanning_enable:
if config.exists("scan_attempts"):
config.remove("", "scan_attempts")
config.writeFile(fp)
else:
if not config.exists("scan_attempts"):
config.addInteger("", "scan_attempts")
config.setValue("scan_attempts", geolocation_scanning_enable)
config.writeFile(fp)
result['geolocation_scanning_enable'] = geolocation_scanning_enable
result['SDCERR'] = weblcm_def.WEBLCM_ERRORS.get('SDCERR_SUCCESS')
result['InfoMsg'] = ''
return result
|
py | b411d5ab944cda058da0e01ed2454c7d84f72a34 | import pytest
from django.urls import resolve, reverse
from gestao_rh.users.models import User
pytestmark = pytest.mark.django_db
def test_user_detail(user: User):
assert (
reverse("api:user-detail", kwargs={"username": user.username})
== f"/api/users/{user.username}/"
)
assert resolve(f"/api/users/{user.username}/").view_name == "api:user-detail"
def test_user_list():
assert reverse("api:user-list") == "/api/users/"
assert resolve("/api/users/").view_name == "api:user-list"
def test_user_me():
assert reverse("api:user-me") == "/api/users/me/"
assert resolve("/api/users/me/").view_name == "api:user-me"
|
py | b411d5e6147a69e98a2cacefc413839bdd50afeb | # *********************************************************************************
# REopt, Copyright (c) 2019-2020, Alliance for Sustainable Energy, LLC.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this list
# of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# Neither the name of the copyright holder nor the names of its contributors may be
# used to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
# *********************************************************************************
from django.test import TestCase
from reo.src.load_profile import BuiltInProfile
class LoadsTestCase(TestCase):
# mimic user passing in info
def setUp(self):
pass
def test_ashrae_zones(self):
test_cities = ['Florida City', 'Tucson', 'Oklahoma City', 'Sacremento', 'San Jose',
'Bowling Green', 'Amarillo', 'Portland', 'Boston', 'Salt Lake City', 'Green Bay', 'Casper',
'Grand Rapids', 'Bethel']
test_latitudes = [25.4479, 32.2217, 35.4676, 38.5816, 37.3382,
36.9685, 35.2220, 45.5231, 42.3600825, 40.7608, 44.5133, 42.8501,
47.2372, 60.7922]
test_longitudes = [-80.479, -110.9265, -97.5164, -121.4944, -121.8863,
-86.4808, -101.8313, -122.6765, -71.0588801, -111.8910, -88.0133, -106.3252,
-96.5302, -161.7558]
expected_ashrae_city = ['Miami', 'Phoenix', 'Atlanta', 'LosAngeles', 'SanFrancisco',
'Baltimore', 'Albuquerque', 'Seattle', 'Chicago', 'Boulder', 'Minneapolis', 'Helena',
'Duluth', 'Fairbanks']
for i in range(0, len(test_cities)):
profile = BuiltInProfile(test_latitudes[i], test_longitudes[i], "MidriseApartment", 500000)
self.assertEqual(profile.city, expected_ashrae_city[i], {r"reopt": {"Error": "Incorrect ASHRAE city returned for test city: " + test_cities[i] + " Expected: " + expected_ashrae_city[i] + " Actual: " + profile.city}})
|
py | b411d824b12ae843273c672ea908150337efbce1 | from configparser import ConfigParser
import os
from re import escape
from core import decoder
from core.auto import auto
from core import client
import sys
def get_cfg(path):
cfg_path = "{}/UserData/config".format(path)
file_list = os.listdir(cfg_path)
cfg_list = []
for cfg in file_list:
if ".ini" in cfg:
cfg_list.append(cfg)
return cfg_list
def select_cfg(cfg_list):
print("請選擇設定檔")
i = 1
for cfg in cfg_list:
print("\033[1;34m {0}: {1}\033[0m".format(i, cfg))
i += 1
print("\033[1;31m e: 離開\033[0m")
try:
inputIndex = input(
" 請輸入設定檔編號 [0 ~ {0}]: ".format(i-1))
value = int(inputIndex)
if value >= i:
raise Exception("index is to big.")
return value
except (KeyboardInterrupt, SystemExit):
raise Exception("KeyboardInterrupt")
except Exception as e:
if "e" == inputIndex or "E" == inputIndex:
return -1
else:
print(
"\033[1;31m編號錯誤,請確認後輸入\033[0m")
input("請輸入enter繼續")
return select_cfg(cfg_list)
if __name__ == '__main__':
path = os.path.dirname(os.path.abspath(__file__))
os.system("{0}/adb/adb.exe kill-server".format(path))
os.system("{0}/adb/adb.exe start-server".format(path))
while True:
os.system('cls')
try:
device = client.get_devices(path)
except Exception as e:
print(e.args[0])
break
print("\033[1;33m你選擇的設備是: {}\n\033[0m".format(device))
try:
cfg_name = get_cfg(path)[int(select_cfg(get_cfg(path)))-1]
except Exception as e:
print(e.args[0])
break
print("\033[1;33m你選擇的設定檔是: {}\n\033[0m".format(cfg_name))
ini_path = "{}/UserData/config/{}".format(path, cfg_name)
cfg = ConfigParser()
cfg.read(ini_path)
ver = cfg['version']['version']
support = cfg['support']['support']
apple_count = cfg['ap_recover']['count']
apple = cfg['ap_recover']['apple']
recover_time = cfg['recover_time']['recover_time']
battle1_str = cfg['default_skill']['battle1']
battle2_str = cfg['default_skill']['battle2']
battle3_str = cfg['default_skill']['battle3']
crd1_str = cfg['default_card']['battle1']
crd2_str = cfg['default_card']['battle2']
crd3_str = cfg['default_card']['battle3']
codelist = [battle1_str, battle2_str,
battle3_str, crd1_str, crd2_str, crd3_str]
run_times = input("請輸入次數")
while not run_times.isdigit():
os.system('cls')
run_times = input("請輸入次數")
try:
round = auto("menu.png", support, int(apple_count), apple, device, int(
recover_time) * 60, run_time=int(run_times), ver=ver)
instr = decoder.decode(codelist)
round.quick_start(True)
for runs in range(int(run_times)):
for i in range(0, len(instr)):
exec(instr[i])
except Exception as e:
input("按下Enter結束執行 ")
finally:
ctrl = input("請輸入Enter繼續,或輸入'e'已離開程式")
if ctrl.lower() == "e":
break
else:
os.system("cls")
|
py | b411d84a6b765dbcbc2e8e00572f100c07182afc | #!/usr/bin/python3
import sys
import os
from clint.textui import colored
from codemon.CodemonHelp import showHelp
from codemon.CodemonListen import listen
from codemon.CodemonInit import init, init_single_file
from codemon.CodemonReg import codemonReg
from codemon.CodemonMeta import get_filename, get_practice_files
from codemon.CodemonFetch import fetch_tests
from codemon.CodemonParse import Parser
def main():
arg = Parser()
arg.parse(sys.argv[1:])
if arg.help:
showHelp()
elif arg.to_listen:
listen()
elif arg.to_practice:
contestName = arg.name
practiceFiles = get_practice_files()
init(contestName, practiceFiles, arg.init_flags)
elif arg.to_init:
if arg.init_flags["is_single"]:
fileName = arg.name
init_single_file(f'{fileName}', arg.init_flags)
else:
contestName = arg.name
fileNames = get_filename(contestName)
init(contestName, fileNames, arg.init_flags)
if arg.init_flags["to_fetch"]:
fetch_tests(fileNames, contestName)
elif arg.to_fetch:
contestName = os.path.basename(os.getcwd())
fileNames = get_filename(contestName)
fetch_tests(fileNames, contestName)
elif arg.Reg:
codemonReg()
else:
showHelp()
|
py | b411d8518309ba91888df1ae67705bf1f2a832a9 | try: from datasets.dataset import COCOInputDataset
except: from Cyclotron.datasets.dataset import COCOInputDataset
try: from datasets.dataset import ImageDataset
except: from Cyclotron.datasets.dataset import ImageDataset
from detectron2 import model_zoo
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.data.datasets import register_coco_instances
from detectron2.engine import DefaultTrainer
from detectron2.config import get_cfg
from detectron2.engine import DefaultPredictor
from detectron2.structures import boxes, Instances
from detectron2.utils.visualizer import Visualizer, ColorMode
import torch
import cv2, os, time
import matplotlib.pyplot as plt
class Detector:
def __init__(self, NUM_CLASSES = 1, NUM_WORKERS = 2,
load_model_flag = True, cfg_file_path = None, input_dataset = None,
model_name = None, output_dir = './Outputs/', parameters = None):
assert model_name, "model_name required"
assert parameters['THRESHOLD'], 'model test threshold required in parameters'
self.model_name = model_name
self.parameters = parameters
if load_model_flag:
assert self.model_name, "provide model architecture name of the saved detectron2 model Eg. mask_rcnn_R_50_FPN_1x"
self.predictor = self.load_model(NUM_CLASSES = NUM_CLASSES, cfg_file_path = cfg_file_path)
else:
assert input_dataset, "provide input dataset object for training"
self.predictor = self.train_model(NUM_CLASSES = NUM_CLASSES, NUM_WORKERS = NUM_WORKERS,
inp_dataset = input_dataset, output_dir = output_dir)
#trains a detectron2 Mask R-CNN model
def train_model(self, NUM_CLASSES, NUM_WORKERS, inp_dataset, output_dir):
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file('COCO-InstanceSegmentation/' + self.model_name + '.yaml'))
print(self.model_name)
cfg.DATASETS.TRAIN = (inp_dataset.dataset_name,)
cfg.DATASETS.TEST = ()
cfg.DATALOADER.NUM_WORKERS = NUM_WORKERS
cfg.MODEL.ROI_HEADS.NUM_CLASSES = NUM_CLASSES
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url('COCO-InstanceSegmentation/' + self.model_name + '.yaml')
if self.parameters:
if 'device' in self.parameters.keys():
cfg.MODEL.DEVICE = self.parameters['device']
else:
if not torch.cuda.is_available(): cfg.MODEL.DEVICE = 'cpu'
if 'IMS_PER_BATCH' in self.parameters.keys():
cfg.SOLVER.IMS_PER_BATCH = self.parameters['IMS_PER_BATCH']
print('images per batch', self.parameters['IMS_PER_BATCH'])
else:
cfg.SOLVER.IMS_PER_BATCH = 13
if 'BASE_LR' in self.parameters.keys():
cfg.SOLVER.BASE_LR = self.parameters['BASE_LR']
print('base lr', self.parameters['BASE_LR'])
if 'MAX_ITER' in self.parameters.keys():
cfg.SOLVER.MAX_ITER = self.parameters['MAX_ITER']
print('max iter', self.parameters['MAX_ITER'])
if 'BATCH_SIZE_PER_IMAGE' in self.parameters.keys():
cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = self.parameters['BATCH_SIZE_PER_IMAGE']
print('batch size per image', self.parameters['BATCH_SIZE_PER_IMAGE'])
else:
if not torch.cuda.is_available(): cfg.MODEL.DEVICE = 'cpu'
cfg.SOLVER.IMS_PER_BATCH = 13
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url('COCO-InstanceSegmentation/' + self.model_name + '.yaml')
cfg.OUTPUT_DIR = output_dir
os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
trainer = DefaultTrainer(cfg)
trainer.resume_or_load(resume = False)
trainer.train()
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = self.parameters['THRESHOLD']
predictor = DefaultPredictor(cfg)
cfg.dump(stream=open(cfg.OUTPUT_DIR + '/model_final_cfg.yaml', 'w'))
return predictor
#loads saved detectron2 model
def load_model(self, NUM_CLASSES, cfg_file_path):
assert cfg_file_path, 'provide cfg_file_path'
cfg = get_cfg()
if self.parameters and 'device' in self.parameters.keys():
cfg.MODEL.DEVICE = self.parameters['device']
else:
if not torch.cuda.is_available(): cfg.MODEL.DEVICE = 'cpu'
cfg.merge_from_file(cfg_file_path)
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = self.parameters['THRESHOLD']
cfg.MODEL.ROI_HEADS.NUM_CLASSES = NUM_CLASSES
predictor = DefaultPredictor(cfg)
return predictor
def visualize_predictions(self, image, outputs, metadata = None, instance_mode = ColorMode.IMAGE):
v = Visualizer(image[:, :, ::-1],
metadata = metadata,
scale=0.75,
instance_mode = instance_mode)
out = v.draw_instance_predictions(outputs)
plt.tick_params(left = False, right = False,
labelleft = False, labelbottom = False)
plt.imshow(out.get_image()[:, :, ::-1])
plt.show()
return out.get_image()[:, :, ::-1]
#saves visualised prediction images
def save_prediction_images(self, inp_dataset = None, save_path = None, outputs = None, sem_seg_color = None, is_seg_color = False):
assert save_path, 'Provide save_path to save the images'
assert inp_dataset, 'Provide inp_dataset'
assert outputs, 'Provide outputs'
for i, output in outputs.items():
image = cv2.imread(inp_dataset.dataset_images_path + '/' + inp_dataset.get_image_name(i))
if is_seg_color:
metadata = inp_dataset.dataset_metadata
metadata.get('thing_colors') = [sem_seg_color]
image_result = self.visualize_predictions(image = image,
outputs = output['instances'].to('cpu'),
metadata = inp_dataset.dataset_metdata,
instance_mode = ColorMode.SEGMENTATION)
image_result = self.visualize_predictions(image, output['instances'].to('cpu'))
cv2.imwrite(save_path + '/' + inp_dataset.get_image_name(i), image_result)
def get_predictions_of_all_images(self, inp_dataset = None, visualize = False):
assert inp_dataset, 'Provide inp_dataset'
start = time.time()
image_id_list = inp_dataset.image_id_list
outputs_in_Instance_class = {}
count = 1
for image_name, image_id in image_id_list.items():
d = {}
if (count % 50 == 0) or (count == len(image_id_list)) or (count == 1):
string_a = str(count) + ' image ' if count == 1 else str(count) + ' images '
print('Predictions for ' + string_a + 'made')
image = cv2.imread(inp_dataset.dataset_images_path + '/' + image_name)
o = self.predictor(image)
if not o:
pred_box = []
req_mask = []
req_class = []
req_score = []
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
pred_bboxes = boxes.Boxes(torch.Tensor(pred_box).to(device))
scores = torch.Tensor(req_score).to(device)
pred_class = torch.Tensor(req_class).to(device).type(torch.int64)
pred_masks = torch.Tensor(req_mask).to(device).type(torch.uint8)
obj = Instances(image_size = image_shape)
obj.set('pred_classes', pred_class)
obj.set('scores', scores)
obj.set('pred_masks', pred_masks)
obj.set('pred_boxes', pred_bboxes)
o = {'instances': obj}
d = o['instances'].to('cpu')
outputs_in_Instance_class[image_id] = {'instances': d}
if visualize: image_result = self.visualize_predictions(image, o)
count += 1
print(f'Got all pedictions in {time.time() - start}')
return outputs_in_Instance_class |
py | b411d905ea2e848a8aeb7e244d5a07ddb93ba01f | class Solution:
def maxProfitAssignment(self, difficulty, profit, worker):
"""
:type difficulty: List[int]
:type profit: List[int]
:type worker: List[int]
:rtype: int
"""
jobs = list(zip(difficulty, profit))
jobs.sort()
total = i = maxProfit = 0
for w in sorted(worker):
while i < len(jobs) and jobs[i][0] <= w:
maxProfit = max(maxProfit, jobs[i][1])
i += 1
total += maxProfit
return total
|
py | b411d957a54dbd164615caac1fa3c1a561635a34 | from game_objects.game_object import GameObject
from program_variables import GLOBAL_WIDTH, GLOBAL_HEIGHT
import random
SCREEN_WIDTH = GLOBAL_WIDTH
SCREEN_HEIGHT = GLOBAL_HEIGHT
class Asteroid(GameObject):
def __init__(self, x: int, y: int, id):
super().__init__(x, y)
self.vx = random.randint(1, 5)
self.vy = random.randint(1, 5)
self.shift_x = 0
self.shift_y = 0
self.delay = 10
self.id = id
def on_tick(self):
if self.delay > 0:
new_x = self.x + self.shift_x
new_y = self.y + self.shift_y
self.x = new_x % SCREEN_WIDTH
self.y = new_y % SCREEN_HEIGHT
self.delay -= 1
else:
self.shift_x = random.randint(-5, 5)
self.shift_y = random.randint(-5, 5)
self.delay = 10
|
py | b411d9be81170f6a3d2b567eab632c199d92696a | import os
from datetime import datetime
import requests
import logging
URL = 'https://cointelegraph.com'
LOGFILE = 'cointelegraph.log'
def get_logger():
logger = logging.getLogger('ct_logger')
logger.setLevel('DEBUG')
console_handler = logging.StreamHandler()
console_formatter = logging.Formatter('%(levelname)-8s: %(message)s')
console_handler.setFormatter(console_formatter)
file_handler = logging.FileHandler(LOGFILE, mode='a')
file_formatter = logging.Formatter(
'%(asctime)s %(name)s %(levelname)-8s: %(message)s',
"%Y-%m-%d %H:%M:%S")
file_handler.setFormatter(file_formatter)
logger.addHandler(console_handler)
logger.addHandler(file_handler)
return logger
def string_to_date(date_str):
return datetime.strptime(date_str, '%Y-%m-%d').date()
def validate_csv_filepath_arg(filepath):
if filepath is None or not os.path.exists(filepath):
logger.critical('Invalid CSV file')
return False
return True
def validate_date_arg(date_str):
try:
if date_str is None:
logger.critical('No date found')
return False
date = string_to_date(date_str)
if date > datetime.now().date():
logger.critical(
'Entered date must not be greater than today\'s date')
return False
return True
except Exception:
logger.critical('Invalid date')
return False
def get_html(driver):
html = driver.page_source
return html
def write_html(filename, content):
with open(filename, 'w+') as hf:
hf.write(content)
hf.close()
def get_content(url):
HEADERS = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; \
rv:73.0) \Gecko/20100101 Firefox/73.0',
}
return requests.get(url, headers=HEADERS).text
def is_url_valid(url):
return url.find('/news') > 0
logger = get_logger()
|
py | b411dac20b0f1e4058092781f140a559821b82aa | import pytest
from django.conf import settings
from django.test import RequestFactory
from jnc.users.tests.factories import UserFactory
@pytest.fixture(autouse=True)
def media_storage(settings, tmpdir):
settings.MEDIA_ROOT = tmpdir.strpath
@pytest.fixture
def user() -> settings.AUTH_USER_MODEL:
return UserFactory()
@pytest.fixture
def request_factory() -> RequestFactory:
return RequestFactory()
|
py | b411dc071debdf4979e4b28537cdca52546b1977 | #!/usr/bin/env python3
import sys
import argparse
from prettytable import PrettyTable
from table_method import table_random
from program_method import program_random
import criterion
def create_parser():
parser = argparse.ArgumentParser()
parser.add_argument('-n', '--count', required=True, type=int)
return parser
def main():
parser = create_parser()
args = parser.parse_args(sys.argv[1:])
table_random_one = list()
table_random_two = list()
table_random_thr = list()
progr_random_one = list()
progr_random_two = list()
progr_random_thr = list()
table = PrettyTable()
table.field_names = [
"\033[1mN\033[0m",
"\033[1mТабл. 1 цифра\033[0m",
"\033[1mТабл. 2 цифры\033[0m",
"\033[1mТабл. 3 цифры\033[0m",
"\033[1mПрогр. 1 цифра\033[0m",
"\033[1mПрогр. 2 цифры\033[0m",
"\033[1mПрогр. 3 цифры\033[0m",
]
for i in range(args.count):
table_random_one.append(table_random(1))
table_random_two.append(table_random(2))
table_random_thr.append(table_random(3))
progr_random_one.append(program_random(1))
progr_random_two.append(program_random(2))
progr_random_thr.append(program_random(3))
table.add_row([
i + 1,
table_random_one[-1],
table_random_two[-1],
table_random_thr[-1],
progr_random_one[-1],
progr_random_two[-1],
progr_random_thr[-1],
])
print(table)
table_frequency = PrettyTable()
table_frequency.field_names = [
"\033[1mЧастотный критерий\033[0m",
"\033[1mТабл. 1 цифра\033[0m",
"\033[1mТабл. 2 цифры\033[0m",
"\033[1mТабл. 3 цифры\033[0m",
"\033[1mПрогр. 1 цифра\033[0m",
"\033[1mПрогр. 2 цифры\033[0m",
"\033[1mПрогр. 3 цифры\033[0m",
]
table_frequency.add_row([
"Полученный результат",
"{:.4f}".format(criterion.frequency(table_random_one, 0, 9)),
"{:.4f}".format(criterion.frequency(table_random_two, 10, 99)),
"{:.4f}".format(criterion.frequency(table_random_thr, 100, 999)),
"{:.4f}".format(criterion.frequency(progr_random_one, 0, 9)),
"{:.4f}".format(criterion.frequency(progr_random_two, 10, 99)),
"{:.4f}".format(criterion.frequency(progr_random_thr, 100, 999)),
])
table_frequency.add_row([
"Ожидаемый результат",
"{:.4f}".format(criterion.ideal_frequency(0, 9)),
"{:.4f}".format(criterion.ideal_frequency(10, 99)),
"{:.4f}".format(criterion.ideal_frequency(100, 999)),
"{:.4f}".format(criterion.ideal_frequency(0, 9)),
"{:.4f}".format(criterion.ideal_frequency(10, 99)),
"{:.4f}".format(criterion.ideal_frequency(100, 999)),
])
print(table_frequency)
table_xi = PrettyTable()
table_xi.field_names = [
"\033[1mКритерий хи квадрат\033[0m",
"\033[1mТабл. 1 цифра\033[0m",
"\033[1mТабл. 2 цифры\033[0m",
"\033[1mТабл. 3 цифры\033[0m",
"\033[1mПрогр. 1 цифра\033[0m",
"\033[1mПрогр. 2 цифры\033[0m",
"\033[1mПрогр. 3 цифры\033[0m",
]
chi_table_one = criterion.calc_hi(table_random_one)
chi_table_two = criterion.calc_hi(table_random_two)
chi_table_thr = criterion.calc_hi(table_random_thr)
chi_progr_one = criterion.calc_hi(progr_random_one)
chi_progr_two = criterion.calc_hi(progr_random_two)
chi_progr_thr = criterion.calc_hi(progr_random_thr)
table_xi.add_row([
"chi^2",
"{:.4f}".format(chi_table_one[0]),
"{:.4f}".format(chi_table_two[0]),
"{:.4f}".format(chi_table_thr[0]),
"{:.4f}".format(chi_progr_one[0]),
"{:.4f}".format(chi_progr_two[0]),
"{:.4f}".format(chi_progr_thr[0]),
])
table_xi.add_row([
"p",
"{:.4f}".format(chi_table_one[1] * 100),
"{:.4f}".format(chi_table_two[1] * 100),
"{:.4f}".format(chi_table_thr[1] * 100),
"{:.4f}".format(chi_progr_one[1] * 100),
"{:.4f}".format(chi_progr_two[1] * 100),
"{:.4f}".format(chi_progr_thr[1] * 100),
])
print(table_xi)
if __name__ == "__main__":
main()
|
py | b411dcaac10fdc4acbf00531e6e4b6f984818bf9 | """Database models for dashboard app."""
from django.db import models
class ProjectSummary(models.Model):
"""Summary data about a JIRA filter for a particular date."""
filter_id = models.IntegerField()
incomplete = models.IntegerField()
complete = models.IntegerField()
total = models.IntegerField()
created_on = models.DateField()
updated_at = models.DateTimeField(null=False)
class Meta:
verbose_name = "project summary"
verbose_name_plural = "project summaries"
unique_together = (("filter_id", "created_on"))
get_latest_by = "updated_at"
def __repr__(self):
return "<ProjectSummary {} filter: {} created_on: {}>".format(self.id, self.filter_id, self.created_on)
@property
def pct_complete(self):
"""How complete is the project.
Returns:
float: Percentage of the project that is complete.
"""
if self.total == 0:
return float(0)
return self.complete / float(self.total)
|
py | b411dcb7bc34c0e05a82e69b2adf50c46dd9a0f0 | #------------------------------------------------------------------------------
# Copyright (c) 2013, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
#------------------------------------------------------------------------------
from atom.api import Bool, Typed, ForwardTyped
from enaml.application import Application
from enaml.colors import ColorMember, Color
from enaml.core.declarative import d_, observe
from .toolkit_dialog import ToolkitDialog, ProxyToolkitDialog
class ProxyColorDialog(ProxyToolkitDialog):
""" The abstract definition of a proxy ColorDialog object.
"""
#: A reference to the ColorDialog declaration.
declaration = ForwardTyped(lambda: ColorDialog)
@staticmethod
def custom_count():
raise NotImplementedError
@staticmethod
def custom_color(index):
raise NotImplementedError
@staticmethod
def set_custom_color(index, color):
raise NotImplementedError
def set_current_color(self, color):
raise NotImplementedError
def set_show_alpha(self, show):
raise NotImplementedError
def set_show_buttons(self, show):
raise NotImplementedError
class ColorDialog(ToolkitDialog):
""" A toolkit dialog that allows the user to select a color.
"""
#: The currently selected color of the dialog.
current_color = d_(ColorMember('white'))
#: Whether or not to show the alpha value control.
show_alpha = d_(Bool(True))
#: Whether or not to show the dialog ok/cancel buttons.
show_buttons = d_(Bool(True))
#: The color selected when the user clicks accepts the dialog.
#: This value is output only.
selected_color = ColorMember()
#: A reference to the ProxyColorDialog object.
proxy = Typed(ProxyColorDialog)
@staticmethod
def get_color(parent=None, **kwargs):
""" A static method which launches a color dialog.
Parameters
----------
parent : ToolkitObject or None
The parent toolkit object for this dialog.
**kwargs
Additional data to pass to the dialog constructor.
Returns
-------
result : Color or None
The selected color or None if no color was selected.
"""
dialog = ColorDialog(parent, **kwargs)
if dialog.exec_():
return dialog.selected_color
@staticmethod
def custom_count():
""" Get the number of available custom colors.
The custom colors are shared among all color dialogs.
Returns
-------
result : int
The number of available custom colors.
Notes
-----
The Application object must exist before calling this method.
"""
app = Application.instance()
assert app is not None, 'the application object does not exist'
proxy_cls = app.resolve_proxy_class(ColorDialog)
if proxy_cls is not None:
return proxy_cls.custom_count()
return 0
@staticmethod
def custom_color(index):
""" Get the custom color for the given index.
The custom colors are shared among all color dialogs.
Parameters
----------
index : int
The integer index of the custom color.
Returns
-------
result : Color
The custom color for the index.
Notes
-----
The Application object must exist before calling this method.
"""
app = Application.instance()
assert app is not None, 'the application object does not exist'
proxy_cls = app.resolve_proxy_class(ColorDialog)
if proxy_cls is not None:
return proxy_cls.custom_color(index)
return Color(255, 255, 255)
@staticmethod
def set_custom_color(index, color):
""" Set the custom color for the given index.
The custom colors are shared among all color dialogs.
Parameters
----------
index : int
The integer index of the custom color.
color : Color
The custom color to set for the index
Notes
-----
The Application object must exist before calling this method.
"""
app = Application.instance()
assert app is not None, 'the application object does not exist'
proxy_cls = app.resolve_proxy_class(ColorDialog)
if proxy_cls is not None:
proxy_cls.set_custom_color(index, color)
#--------------------------------------------------------------------------
# Observers
#--------------------------------------------------------------------------
@observe('current_color', 'show_alpha', 'show_buttons')
def _update_proxy(self, change):
""" An observer which updates the proxy when the data changes.
"""
# The superclass implementation is sufficient.
super(ColorDialog, self)._update_proxy(change)
#--------------------------------------------------------------------------
# Utility Methods
#--------------------------------------------------------------------------
def _prepare(self):
""" A reimplemented preparation method.
This method resets the selected color to None.
"""
super(ColorDialog, self)._prepare()
self.selected_color = None
|
py | b411dcd9808a05d2590aafb36926a4affb82609e | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 19 23:13:24 2019
@author: Abhishek Mukherjee
"""
marksheet=[];
for _ in range(int(input())):
name = input()
score = float(input())
marksheet.append([name, score])
sorted_marksheet=sorted(marksheet, key = lambda x: x[1])
secondLast=sorted_marksheet[1]
ThirdLast=sorted_marksheet[2]
scoreSecondLast=secondLast[1]
scoreThirdLast=ThirdLast[1]
if scoreSecondLast==scoreThirdLast:
sorted_names=sorted([secondLast[0],ThirdLast[0]])
print(sorted_names[0])
print(sorted_names[1])
else:
print(secondLast[0])
marksheet=[]
dupNames=[]
for _ in range(int(input())):
name = input()
score = float(input())
marksheet.append([name, score])
sorted_marksheet=sorted(marksheet, key = lambda x: x[1])
if sorted_marksheet[0][1]==sorted_marksheet[1][1]==sorted_marksheet[2][1]:
secondLast=sorted_marksheet[3]
if sorted_marksheet[0][1]==sorted_marksheet[1][1]:
secondLast=sorted_marksheet[2]
else:
secondLast=sorted_marksheet[1]
scoreSecondLast=secondLast[1]
for i in range(len(marksheet)):
if scoreSecondLast==marksheet[i][1]:
dupNames.append(marksheet[i][0])
sorted_names=sorted(dupNames)
print('\n'.join(map(str, sorted_names))) |
py | b411def7d8129220536877dc7e7e8e2c57368cdc | import pandas as pd
import altair as alt
def numeric_plots(df):
"""
Creating a matrix of correlation plots with the numeric features.
Parameters
----------
df: pandas.dataframe
A pandas dataframe
Returns
-------
splom: Altair chart object
The Altair object for the plots
Example
-------
>>> from EDAhelper.numeric_plots import numeric_plots
>>> splom = numeric_plot(df)
"""
# Data validation
if not isinstance(df, pd.DataFrame):
raise TypeError("'df' should be of type 'pandas.DataFrame'.")
numeric_cols = df.select_dtypes(include=['float64', 'int64'])
splom = alt.Chart(numeric_cols).mark_point(opacity=0.3, size=10).encode(
alt.X(
alt.repeat('row'),
type='quantitative',
scale=alt.Scale(zero=False)
),
alt.Y(
alt.repeat('column'),
type='quantitative',
scale=alt.Scale(zero=False)
)
).properties(
width=120,
height=120
).repeat(
column=list(numeric_cols.columns),
row=list(numeric_cols.columns)
).configure_axis(
labelFontSize=8,
titleFontSize=8
)
return splom
print(numeric_plots.__doc__)
|
py | b411df4ec3a39872b891a1c3a0a6e7f257f58e40 | """
Partial Correlation in Python (clone of Matlab's partialcorr)
This uses the linear regression approach to compute the partial
correlation (might be slow for a huge number of variables). The
algorithm is detailed here:
http://en.wikipedia.org/wiki/Partial_correlation#Using_linear_regression
Taking X and Y two variables of interest and Z the matrix with all the variable minus {X, Y},
the algorithm can be summarized as
1) perform a normal linear least-squares regression with X as the target and Z as the predictor
2) calculate the residuals in Step #1
3) perform a normal linear least-squares regression with Y as the target and Z as the predictor
4) calculate the residuals in Step #3
5) calculate the correlation coefficient between the residuals from Steps #2 and #4;
The result is the partial correlation between X and Y while controlling for the effect of Z
Date: Nov 2014
Author: Fabian Pedregosa-Izquierdo, [email protected]
Testing: Valentina Borghesani, [email protected]
"""
import numpy as np
from scipy import stats, linalg
def partial_corr(C):
"""
Returns the sample linear partial correlation coefficients between pairs of variables in C, controlling
for the remaining variables in C.
Parameters
----------
C : array-like, shape (n, p)
Array with the different variables. Each column of C is taken as a variable
Returns
-------
P : array-like, shape (p, p)
P[i, j] contains the partial correlation of C[:, i] and C[:, j] controlling
for the remaining variables in C.
"""
C = np.asarray(C)
p = C.shape[1]
P_corr = np.zeros((p, p), dtype=np.float)
for i in range(p):
P_corr[i, i] = 1
for j in range(i+1, p):
idx = np.ones(p, dtype=np.bool)
idx[i] = False
idx[j] = False
beta_i = linalg.lstsq(C[:, idx], C[:, j])[0]
beta_j = linalg.lstsq(C[:, idx], C[:, i])[0]
res_j = C[:, j] - C[:, idx].dot( beta_i)
res_i = C[:, i] - C[:, idx].dot(beta_j)
corr = stats.pearsonr(res_i, res_j)[0]
P_corr[i, j] = corr
P_corr[j, i] = corr
return P_corr |
py | b411dff3df455831b232c906ce9ef0aa41cfe3ec | """
This file stores all the possible configurations for the Flask app.
Changing configurations like the secret key or the database
url should be stored as environment variables and imported
using the 'os' library in Python.
"""
import os
class BaseConfig:
SQLALCHEMY_TRACK_MODIFICATIONS = False
SERVER = os.getenv('ORACLE_SERVER', 'jqadb7.qa.j.intershop.de')
PORT = os.getenv('ORACLE_PORT', 1521)
SERVICE = os.getenv('ORACLE_SERVICE', 'ORCL12')
USERNAME = os.getenv('ORACLE_USERNAME', 'system')
PASSWORD = os.getenv('ORACLE_PASSWORD', 'intershop')
COLLECT_METRICS_INTERVAL_SEC = int(
os.getenv('COLLECT_METRICS_INTERVAL_SEC', 120))
DEBUG = False
TESTING = False
class TestingConfig(BaseConfig):
DEBUG = True
TESTING = True
SERVER = os.getenv('ORACLE_SERVER', 'my_host')
PORT = os.getenv('ORACLE_PORT', 1521)
class DevelopmentConfig(BaseConfig):
DEBUG = True
class ProductionConfig(BaseConfig):
DEBUG = False
|
py | b411e007f5b2b620c20bb6bf22de8121f4259ccf | #!/usr/bin/env python
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Train a Fast R-CNN network on a region of interest database."""
import _init_paths
from fast_rcnn.train import get_training_roidb, train_net
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from datasets.factory import get_imdb
import caffe
import argparse
import pprint
import numpy as np
import sys
import time
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train a Fast R-CNN network')
parser.add_argument('--gpu', dest='gpu_id',
help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--solver', dest='solver',
help='solver prototxt',
type=str)
parser.add_argument('--iters', dest='max_iters',
help='number of iterations to train',
default=40000, type=int)
parser.add_argument('--weights', dest='pretrained_model',
help='initialize with pretrained model weights',
type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default=None, type=str)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to train on',
default='sensiac_train', type=str)
parser.add_argument('--rand', dest='randomize',
help='randomize (do not use a fixed seed)',
action='store_true')
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
print('Using config:')
pprint.pprint(cfg)
if not args.randomize:
# fix the random seeds (numpy and caffe) for reproducibility
np.random.seed(cfg.RNG_SEED)
caffe.set_random_seed(cfg.RNG_SEED)
# set up caffe
caffe.set_mode_gpu()
if args.gpu_id is not None:
caffe.set_device(args.gpu_id)
imdb = get_imdb(args.imdb_name)
print 'Loaded dataset `{:s}` for training'.format(imdb.name)
roidb = get_training_roidb(imdb)
output_dir = get_output_dir(imdb, None)
print 'Output will be saved to `{:s}`'.format(output_dir)
start = time.clock()
print args.solver
print "Max IterationS"
print args.max_iters
train_net(args.solver, roidb, output_dir,
pretrained_model=args.pretrained_model,
max_iters=args.max_iters)
end = time.clock()
print end-start
|
py | b411e02bffb471572851789289cfcf047244211b | # coding: utf-8
"""
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. # noqa: E501
The version of the OpenAPI document: 1.0.9-1295
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from intersight.configuration import Configuration
class IaasLicenseKeysInfoAllOf(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'count': 'int',
'expiration_date': 'str',
'license_id': 'str',
'pid': 'str'
}
attribute_map = {
'count': 'Count',
'expiration_date': 'ExpirationDate',
'license_id': 'LicenseId',
'pid': 'Pid'
}
def __init__(self,
count=None,
expiration_date=None,
license_id=None,
pid=None,
local_vars_configuration=None): # noqa: E501
"""IaasLicenseKeysInfoAllOf - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._count = None
self._expiration_date = None
self._license_id = None
self._pid = None
self.discriminator = None
if count is not None:
self.count = count
if expiration_date is not None:
self.expiration_date = expiration_date
if license_id is not None:
self.license_id = license_id
if pid is not None:
self.pid = pid
@property
def count(self):
"""Gets the count of this IaasLicenseKeysInfoAllOf. # noqa: E501
Number of licenses available for the UCSD PID (Product ID). # noqa: E501
:return: The count of this IaasLicenseKeysInfoAllOf. # noqa: E501
:rtype: int
"""
return self._count
@count.setter
def count(self, count):
"""Sets the count of this IaasLicenseKeysInfoAllOf.
Number of licenses available for the UCSD PID (Product ID). # noqa: E501
:param count: The count of this IaasLicenseKeysInfoAllOf. # noqa: E501
:type: int
"""
self._count = count
@property
def expiration_date(self):
"""Gets the expiration_date of this IaasLicenseKeysInfoAllOf. # noqa: E501
Expiration date for the license. # noqa: E501
:return: The expiration_date of this IaasLicenseKeysInfoAllOf. # noqa: E501
:rtype: str
"""
return self._expiration_date
@expiration_date.setter
def expiration_date(self, expiration_date):
"""Sets the expiration_date of this IaasLicenseKeysInfoAllOf.
Expiration date for the license. # noqa: E501
:param expiration_date: The expiration_date of this IaasLicenseKeysInfoAllOf. # noqa: E501
:type: str
"""
self._expiration_date = expiration_date
@property
def license_id(self):
"""Gets the license_id of this IaasLicenseKeysInfoAllOf. # noqa: E501
UCS Director Unique license ID. # noqa: E501
:return: The license_id of this IaasLicenseKeysInfoAllOf. # noqa: E501
:rtype: str
"""
return self._license_id
@license_id.setter
def license_id(self, license_id):
"""Sets the license_id of this IaasLicenseKeysInfoAllOf.
UCS Director Unique license ID. # noqa: E501
:param license_id: The license_id of this IaasLicenseKeysInfoAllOf. # noqa: E501
:type: str
"""
self._license_id = license_id
@property
def pid(self):
"""Gets the pid of this IaasLicenseKeysInfoAllOf. # noqa: E501
PID (Product ID) for UCSD License. # noqa: E501
:return: The pid of this IaasLicenseKeysInfoAllOf. # noqa: E501
:rtype: str
"""
return self._pid
@pid.setter
def pid(self, pid):
"""Sets the pid of this IaasLicenseKeysInfoAllOf.
PID (Product ID) for UCSD License. # noqa: E501
:param pid: The pid of this IaasLicenseKeysInfoAllOf. # noqa: E501
:type: str
"""
self._pid = pid
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict()
if hasattr(x, "to_dict") else x, value))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, IaasLicenseKeysInfoAllOf):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, IaasLicenseKeysInfoAllOf):
return True
return self.to_dict() != other.to_dict()
|
py | b411e15d224472bd5af95885c94455df54c9b5ad | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import datetime
class Migration(migrations.Migration):
dependencies = [
('goodsManage', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='GoodRepair',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),
('date', models.DateField(default=datetime.date.today, verbose_name='維修日期')),
('quantity', models.PositiveIntegerField(verbose_name='數量')),
('remark', models.TextField(blank=True, verbose_name='備註')),
('who', models.TextField(verbose_name='誰')),
('good', models.ForeignKey(to='goodsManage.Good')),
('person', models.ForeignKey(verbose_name='維修人', to='goodsManage.Person')),
],
options={
'ordering': ['-date'],
},
),
]
|
py | b411e1c40c473f6c3a0682dd821c1044378b5292 | from dataclasses import dataclass
from typing import List, Tuple, Optional
from blspy import G1Element, G2Element
from Dort.types.blockchain_format.proof_of_space import ProofOfSpace
from Dort.types.blockchain_format.sized_bytes import bytes32
from Dort.util.ints import uint8, uint64
from Dort.util.streamable import Streamable, streamable
"""
Protocol between harvester and farmer.
Note: When changing this file, also change protocol_message_types.py, and the protocol version in shared_protocol.py
"""
@dataclass(frozen=True)
@streamable
class PoolDifficulty(Streamable):
difficulty: uint64
sub_slot_iters: uint64
pool_contract_puzzle_hash: bytes32
@dataclass(frozen=True)
@streamable
class HarvesterHandshake(Streamable):
farmer_public_keys: List[G1Element]
pool_public_keys: List[G1Element]
@dataclass(frozen=True)
@streamable
class NewSignagePointHarvester(Streamable):
challenge_hash: bytes32
difficulty: uint64
sub_slot_iters: uint64
signage_point_index: uint8
sp_hash: bytes32
pool_difficulties: List[PoolDifficulty]
@dataclass(frozen=True)
@streamable
class NewProofOfSpace(Streamable):
challenge_hash: bytes32
sp_hash: bytes32
plot_identifier: str
proof: ProofOfSpace
signage_point_index: uint8
@dataclass(frozen=True)
@streamable
class RequestSignatures(Streamable):
plot_identifier: str
challenge_hash: bytes32
sp_hash: bytes32
messages: List[bytes32]
@dataclass(frozen=True)
@streamable
class RespondSignatures(Streamable):
plot_identifier: str
challenge_hash: bytes32
sp_hash: bytes32
local_pk: G1Element
farmer_pk: G1Element
message_signatures: List[Tuple[bytes32, G2Element]]
@dataclass(frozen=True)
@streamable
class Plot(Streamable):
filename: str
size: uint8
plot_id: bytes32
pool_public_key: Optional[G1Element]
pool_contract_puzzle_hash: Optional[bytes32]
plot_public_key: G1Element
file_size: uint64
time_modified: uint64
@dataclass(frozen=True)
@streamable
class RequestPlots(Streamable):
pass
@dataclass(frozen=True)
@streamable
class RespondPlots(Streamable):
plots: List[Plot]
failed_to_open_filenames: List[str]
no_key_filenames: List[str]
|
py | b411e1ef73b93d4573743932a8d56e8af696f862 | from controllers import *
from service import TwizoService
from worker import HttpClient
class Twizo:
def __init__(self, api_key, api_host):
worker = HttpClient(api_key, api_host)
twizo_service = TwizoService()
self.widget_register_session_controller = WidgetRegisterSessionController(worker, twizo_service)
self.widget_session_controller = WidgetSessionController(worker, twizo_service)
self.number_lookup_controller = NumberLookupController(worker, twizo_service)
self.verification_controller = VerificationController(worker, twizo_service)
self.application_controller = ApplicationController(worker, twizo_service)
self.backup_code_controller = BackupCodeController(worker, twizo_service)
self.bio_voice_controller = BioVoiceController(worker, twizo_service)
self.balance_controller = BalanceController(worker, twizo_service)
self.totp_controller = TotpController(worker, twizo_service)
self.sms_controller = SmsController(worker, twizo_service)
|
py | b411e230d411e75a0fc4aa1aae80ce488c4f2c74 | #=============================================================================
# Copyright 2017 FLIR Integrated Imaging Solutions, Inc. All Rights Reserved.
#
# This software is the confidential and proprietary information of FLIR
# Integrated Imaging Solutions, Inc. ('Confidential Information'). You
# shall not disclose such Confidential Information and shall use it only in
# accordance with the terms of the license agreement you entered into
# with FLIR Integrated Imaging Solutions, Inc. (FLIR).
#
# FLIR MAKES NO REPRESENTATIONS OR WARRANTIES ABOUT THE SUITABILITY OF THE
# SOFTWARE, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE, OR NON-INFRINGEMENT. FLIR SHALL NOT BE LIABLE FOR ANY DAMAGES
# SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR DISTRIBUTING
# THIS SOFTWARE OR ITS DERIVATIVES.
#=============================================================================
import PyCapture2
from sys import exit
def print_build_info():
lib_ver = PyCapture2.getLibraryVersion()
print('PyCapture2 library version: %d %d %d %d' % (lib_ver[0], lib_ver[1], lib_ver[2], lib_ver[3]))
print()
def print_camera_info(cam_info):
print('\n*** CAMERA INFORMATION ***\n')
print('Serial number - %d' % cam_info.serialNumber)
print('Camera model - %s' % cam_info.modelName)
print('Camera vendor - %s' % cam_info.vendorName)
print('Sensor - %s' % cam_info.sensorInfo)
print('Resolution - %s' % cam_info.sensorResolution)
print('Firmware version - %s' % cam_info.firmwareVersion)
print('Firmware build time - %s' % cam_info.firmwareBuildTime)
print()
print('GigE major version - %s' % cam_info.gigEMajorVersion)
print('GigE minor version - %s' % cam_info.gigEMinorVersion)
print('User-defined name - %s' % cam_info.userDefinedName)
print('XML URL1 - %s' % cam_info.xmlURL1)
print('XML URL2 - %s' % cam_info.xmlURL2)
print('MAC address - %d %d %d %d %d %d' % (cam_info.macAddress[0], cam_info.macAddress[1], cam_info.macAddress[2], cam_info.macAddress[3], cam_info.macAddress[4], cam_info.macAddress[5]))
print('IP address - %d %d %d %d' % (cam_info.ipAddress[0], cam_info.ipAddress[1], cam_info.ipAddress[2], cam_info.ipAddress[3]))
print('Subnet mask - %d %d %d %d' % (cam_info.subnetMask[0], cam_info.subnetMask[1], cam_info.subnetMask[2], cam_info.subnetMask[3]))
print('Default geteway - %d %d %d %d' % (cam_info.defaultGateway[0], cam_info.defaultGateway[1], cam_info.defaultGateway[2], cam_info.defaultGateway[3]))
print()
def print_stream_channel_info(stream_info):
print('Network interface: %s' % stream_info.networkInterfaceIndex)
print('Host port: %s' % stream_info.hostPort)
print('Do not fragment bit: %s' % 'Enabled' if stream_info.doNotFragment else 'Disabled')
print('Packet size: %s' % stream_info.packetSize)
print('Inter-packet delay: %s' % stream_info.interPacketDelay)
print('Destination IP address: %d %d %d %d' % (stream_info.destinationIpAddress[0], stream_info.destinationIpAddress[1], stream_info.destinationIpAddress[2], stream_info.destinationIpAddress[3]))
print('Source port (on Camera): %s', stream_info.sourcePort)
print()
def enable_embedded_timestamp(cam, enable_timestamp):
embedded_info = cam.getEmbeddedImageInfo()
if embedded_info.available.timestamp:
cam.setEmbeddedImageInfo(timestamp = enable_timestamp)
if enable_timestamp:
print('\nTimeStamp is enabled.\n')
else:
print('\nTimeStamp is disabled.\n')
def run_single_camera(cam, uid):
print('Connecting to Camera...')
cam.connect(uid)
print_camera_info(cam.getCameraInfo())
for i in range(cam.getNumStreamChannels()):
print_stream_channel_info(cam.getGigEStreamChannelInfo(i))
print('Querying GigE image setting information...')
image_settings_info = cam.getGigEImageSettingsInfo()
image_settings = PyCapture2.GigEImageSettings()
image_settings.offsetX = 0
image_settings.offsetY = 0
image_settings.height = image_settings_info.maxHeight
image_settings.width = image_settings_info.maxWidth
image_settings.pixelFormat = PyCapture2.PIXEL_FORMAT.MONO8
print('Setting GigE image settings...')
cam.setGigEImageSettings(image_settings)
enable_embedded_timestamp(cam, True)
print('Starting image capture...')
cam.startCapture()
prev_ts = None
num_images_to_grab = 10
for i in range(num_images_to_grab):
try:
image = cam.retrieveBuffer()
except PyCapture2.Fc2error as fc2Err:
print('Error retrieving buffer : ', fc2Err)
continue
ts = image.getTimeStamp()
if prev_ts:
diff = (ts.cycleSeconds - prev_ts.cycleSeconds) * 8000 + (ts.cycleCount - prev_ts.cycleCount)
print('Timestamp [ %d %d ] - %d' % (ts.cycleSeconds, ts.cycleCount, diff))
prev_ts = ts
newimg = image.convert(PyCapture2.PIXEL_FORMAT.BGR)
print('Saving the last image to GigEGrabEx.png')
newimg.save('GigEGrabEx.png'.encode('utf-8'), PyCapture2.IMAGE_FILE_FORMAT.PNG)
cam.stopCapture()
enable_embedded_timestamp(cam, False)
cam.disconnect()
#
# Example Main
#
# Print PyCapture2 Library Information
print_build_info()
# Ensure sufficient cameras are found
bus = PyCapture2.BusManager()
cam_infos = bus.discoverGigECameras()
for ci in cam_infos:
print_camera_info(ci)
if not len(cam_infos):
print('No suitable GigE cameras found. Exiting...')
exit()
# Run example on all cameras
cam = PyCapture2.GigECamera()
for i in range(bus.getNumOfCameras()):
uid = bus.getCameraFromIndex(i)
interface_type = bus.getInterfaceTypeFromGuid(uid)
if interface_type == PyCapture2.INTERFACE_TYPE.GIGE:
run_single_camera(cam, uid)
input('Done! Press Enter to exit...\n')
|
py | b411e3896922c2dc783cca5dd4b6f1613b804274 | from output.models.sun_data.ctype.base_td.base_td00101m.base_td00101m4_xsd.base_td00101m4 import (
Test,
Test1,
Root,
)
__all__ = [
"Test",
"Test1",
"Root",
]
|
py | b411e3e28403b84f8fc82bd03966d8bee2ff52bf | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Jérémie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import Tkinter as tk
N = 0
def increment():
global N
N = N + 1
button["text"] = str(N)
root = tk.Tk()
button = tk.Button(root, text=str(N), width=12, command=increment)
button.pack()
root.mainloop()
|
py | b411e48a7928f446ef83556516b137648cc511d5 | #!/usr/bin/env python
"""Unit tests run as PYTHONPATH=../../.. python3 ./test_valve_stack.py."""
# Copyright (C) 2015 Research and Innovation Advanced Network New Zealand Ltd.
# Copyright (C) 2015--2019 The Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import unittest
import ipaddress
import yaml
from ryu.lib import mac
from ryu.ofproto import ofproto_v1_3 as ofp
from faucet import valves_manager
from faucet import valve_of
from faucet.port import (
STACK_STATE_INIT, STACK_STATE_UP,
LACP_PORT_SELECTED, LACP_PORT_UNSELECTED)
from clib.fakeoftable import CONTROLLER_PORT
from clib.valve_test_lib import (
BASE_DP1_CONFIG, CONFIG, STACK_CONFIG, STACK_LOOP_CONFIG, ValveTestBases)
import networkx
class ValveEdgeVLANTestCase(ValveTestBases.ValveTestNetwork):
CONFIG1 = """
dps:
s1:
dp_id: 1
hardware: 'GenericTFM'
stack:
priority: 1
interfaces:
1:
stack:
dp: s2
port: 1
s2:
dp_id: 2
hardware: 'GenericTFM'
interfaces:
1:
stack:
dp: s1
port: 1
2:
stack:
dp: s3
port: 1
s3:
dp_id: 3
hardware: 'GenericTFM'
interfaces:
1:
stack:
dp: s2
port: 2
"""
CONFIG2 = """
dps:
s1:
dp_id: 1
hardware: 'GenericTFM'
stack:
priority: 1
interfaces:
1:
stack:
dp: s2
port: 1
s2:
dp_id: 2
hardware: 'GenericTFM'
interfaces:
1:
stack:
dp: s1
port: 1
2:
stack:
dp: s3
port: 1
s3:
dp_id: 3
hardware: 'GenericTFM'
interfaces:
1:
stack:
dp: s2
port: 2
2:
native_vlan: 100
3:
native_vlan: 100
"""
def setUp(self):
self.setup_valves(self.CONFIG1)
self.activate_stack()
def activate_stack(self):
self.activate_all_ports()
for valve in self.valves_manager.valves.values():
for port in valve.dp.ports.values():
if port.stack:
self.set_stack_port_up(port.number, valve)
def test_edge_vlan(self):
self.update_config(self.CONFIG2, reload_type=None)
self.activate_stack()
s1 = self.valves_manager.valves[1].dp
self.assertTrue(s1.is_stack_root())
self.assertFalse(s1.is_stack_edge())
s2 = self.valves_manager.valves[2].dp
self.assertFalse(s2.is_stack_root())
self.assertFalse(s2.is_stack_edge())
s3 = self.valves_manager.valves[3].dp
self.assertFalse(s3.is_stack_root())
self.assertTrue(s3.is_stack_edge())
match = {'in_port': 2, 'vlan_vid': 0, 'eth_src': self.P2_V100_MAC}
self.network.tables[3].is_output(match, port=3)
match = {'in_port': 3, 'vlan_vid': 0, 'eth_src': self.P2_V100_MAC}
self.network.tables[3].is_output(match, port=2)
class ValveStackMCLAGTestCase(ValveTestBases.ValveTestNetwork):
"""Test stacked MCLAG"""
CONFIG = """
dps:
s1:
%s
stack:
priority: 1
interfaces:
1:
description: p1
stack:
dp: s2
port: 1
2:
description: p2
native_vlan: 100
3:
description: p3
native_vlan: 100
lacp: 1
4:
description: p4
native_vlan: 100
lacp: 1
s2:
hardware: 'GenericTFM'
dp_id: 0x2
interfaces:
1:
description: p1
stack:
dp: s1
port: 1
2:
description: p2
native_vlan: 100
3:
description: p3
native_vlan: 100
lacp: 1
4:
description: p4
native_vlan: 100
lacp: 1
""" % BASE_DP1_CONFIG
def setUp(self):
"""Setup basic loop config"""
self.setup_valves(self.CONFIG)
def get_other_valves(self, valve):
"""Return other running valves"""
return self.valves_manager._other_running_valves(valve) # pylint: disable=protected-access
def test_dpid_nominations(self):
"""Test dpids are nominated correctly"""
self.activate_all_ports()
lacp_ports = {}
for valve in self.valves_manager.valves.values():
for port in valve.dp.ports.values():
if port.lacp:
lacp_ports.setdefault(valve.dp.dp_id, [])
lacp_ports[valve.dp.dp_id].append(port)
port.actor_up()
valve = self.valves_manager.valves[0x1]
other_valves = self.get_other_valves(valve)
# Equal number of LAG ports, choose root DP
nominated_dpid = valve.switch_manager.get_lacp_dpid_nomination(1, valve, other_valves)[0]
self.assertEqual(
nominated_dpid, 0x1,
'Expected nominated DPID %s but found %s' % (0x1, nominated_dpid))
# Choose DP with most UP LAG ports
lacp_ports[0x1][0].actor_nosync()
nominated_dpid = valve.switch_manager.get_lacp_dpid_nomination(1, valve, other_valves)[0]
self.assertEqual(
nominated_dpid, 0x2,
'Expected nominated DPID %s but found %s' % (0x2, nominated_dpid))
def test_no_dpid_nominations(self):
"""Test dpid nomination doesn't nominate when no LACP ports are up"""
self.activate_all_ports()
valve = self.valves_manager.valves[0x1]
other_valves = self.get_other_valves(valve)
# No actors UP so should return None
nominated_dpid = valve.switch_manager.get_lacp_dpid_nomination(1, valve, other_valves)[0]
self.assertEqual(
nominated_dpid, None,
'Did not expect to nominate DPID %s' % nominated_dpid)
# No other valves so should return None
for valve in self.valves_manager.valves.values():
for port in valve.dp.ports.values():
if port.lacp:
port.actor_up()
nominated_dpid = valve.switch_manager.get_lacp_dpid_nomination(1, valve, None)[0]
self.assertEqual(
nominated_dpid, None,
'Did not expect to nominate DPID %s' % nominated_dpid)
def test_nominated_dpid_port_selection(self):
"""Test a nominated port selection state is changed"""
self.activate_all_ports()
lacp_ports = {}
for valve in self.valves_manager.valves.values():
for port in valve.dp.ports.values():
if port.lacp:
lacp_ports.setdefault(valve, [])
lacp_ports[valve].append(port)
port.actor_up()
for valve, ports in lacp_ports.items():
other_valves = self.get_other_valves(valve)
for port in ports:
valve.lacp_update(port, True, 1, 1, other_valves)
# Testing accuracy of varz port_lacp_role
port_labels = {
'port': port.name,
'port_description': port.description,
'dp_name': valve.dp.name,
'dp_id': '0x%x' % valve.dp.dp_id
}
lacp_role = self.get_prom('port_lacp_role', labels=port_labels, bare=True)
self.assertEqual(
port.lacp_port_state(), lacp_role,
'Port %s DP %s role %s differs from varz value %s'
% (port, valve, port.lacp_port_state(), lacp_role))
if valve.dp.dp_id == 0x1:
self.assertEqual(
port.lacp_port_state(), LACP_PORT_SELECTED,
'Expected LACP port %s DP %s to be SELECTED' % (port, valve))
else:
self.assertEqual(
port.lacp_port_state(), LACP_PORT_UNSELECTED,
'Expected LACP port %s DP %s to be UNSELECTED' % (port, valve))
def test_lag_flood(self):
"""Test flooding is allowed for UP & SELECTED LAG links only"""
self.activate_all_ports()
main_valve = self.valves_manager.valves[0x1]
main_other_valves = self.get_other_valves(main_valve)
# Start with all LAG links INIT & UNSELECTED
self.validate_flood(2, 0, 3, False, 'Flooded out UNSELECTED & INIT LAG port')
self.validate_flood(2, 0, 4, False, 'Flooded out UNSELECTED & INIT LAG port')
# Set UP & SELECTED one s1 LAG link
port3 = main_valve.dp.ports[3]
port4 = main_valve.dp.ports[4]
self.apply_ofmsgs(main_valve.lacp_update(port4, True, 1, 1, main_other_valves))
self.apply_ofmsgs(main_valve.lacp_update(port3, False, 1, 1, main_other_valves))
self.validate_flood(2, 0, 3, False, 'Flooded out NOSYNC LAG port')
self.validate_flood(2, 0, 4, True, 'Did not flood out SELECTED LAG port')
# Set UP & SELECTED s2 LAG links
valve = self.valves_manager.valves[0x2]
other_valves = self.get_other_valves(valve)
for port in valve.dp.ports.values():
if port.lacp:
valve.lacp_update(port, True, 1, 1, other_valves)
self.apply_ofmsgs(main_valve.lacp_update(port4, True, 1, 1, main_other_valves))
self.apply_ofmsgs(main_valve.lacp_update(port3, False, 1, 1, main_other_valves))
self.validate_flood(2, 0, 3, False, 'Flooded out UNSELECTED & NOSYNC LAG port')
self.validate_flood(2, 0, 4, False, 'Flooded out UNSELECTED LAG port')
# Set UP & SELECTED both s1 LAG links
self.apply_ofmsgs(main_valve.lacp_update(port3, True, 1, 1, main_other_valves))
self.apply_ofmsgs(main_valve.lacp_update(port4, True, 1, 1, main_other_valves))
self.validate_flood(2, 0, 3, True, 'Did not flood out SELECTED LAG port')
self.validate_flood(2, 0, 4, False, 'Flooded out multiple LAG ports')
def test_lag_pipeline_accept(self):
"""Test packets entering through UP & SELECTED LAG links"""
self.activate_all_ports()
main_valve = self.valves_manager.valves[0x1]
main_other_valves = self.get_other_valves(main_valve)
# Packet initially rejected
self.validate_flood(
3, 0, None, False, 'Packet incoming through UNSELECTED & INIT port was accepted')
self.validate_flood(
4, 0, None, False, 'Packet incoming through UNSELECTED & INIT port was accepted')
# Set one s1 LAG port 4 to SELECTED & UP
port3 = main_valve.dp.ports[3]
port4 = main_valve.dp.ports[4]
self.apply_ofmsgs(main_valve.lacp_update(port4, True, 1, 1, main_other_valves))
self.apply_ofmsgs(main_valve.lacp_update(port3, False, 1, 1, main_other_valves))
self.validate_flood(
3, 0, None, False, 'Packet incoming through NOSYNC port was accepted')
self.validate_flood(
4, 0, None, True, 'Packet incoming through SELECTED port was not accepted')
# Set UP & SELECTED s2 LAG links, set one s1 port down
valve = self.valves_manager.valves[0x2]
other_valves = self.get_other_valves(valve)
for port in valve.dp.ports.values():
if port.lacp:
valve.lacp_update(port, True, 1, 1, other_valves)
self.apply_ofmsgs(main_valve.lacp_update(port4, True, 1, 1, main_other_valves))
self.apply_ofmsgs(main_valve.lacp_update(port3, False, 1, 1, main_other_valves))
self.validate_flood(
3, 0, None, False, 'Packet incoming through UNSELECTED & NOSYNC port was accepted')
self.validate_flood(
4, 0, None, False, 'Packet incoming through UNSELECTED port was accepted')
# Set UP & SELECTED both s1 LAG links
self.apply_ofmsgs(main_valve.lacp_update(port3, True, 1, 1, main_other_valves))
self.apply_ofmsgs(main_valve.lacp_update(port4, True, 1, 1, main_other_valves))
self.validate_flood(
3, 0, None, True, 'Packet incoming through SELECTED port was not accepted')
self.validate_flood(
4, 0, None, True, 'Packet incoming through SELECTED port was not accepted')
class ValveStackMCLAGRestartTestCase(ValveTestBases.ValveTestNetwork):
"""Test stacked MCLAG"""
CONFIG = """
dps:
s1:
%s
stack:
priority: 1
interfaces:
1:
description: p1
stack:
dp: s2
port: 1
2:
description: p2
native_vlan: 100
3:
description: p3
native_vlan: 100
lacp: 1
4:
description: p4
native_vlan: 100
lacp: 1
s2:
hardware: 'GenericTFM'
dp_id: 0x2
interfaces:
1:
description: p1
stack:
dp: s1
port: 1
2:
description: p2
native_vlan: 100
3:
description: p3
native_vlan: 100
lacp: 1
4:
description: p4
native_vlan: 100
lacp: 1
""" % BASE_DP1_CONFIG
def setUp(self):
"""Setup basic loop config"""
self.setup_valves(self.CONFIG)
def get_other_valves(self, valve):
"""Return other running valves"""
return self.valves_manager._other_running_valves(valve) # pylint: disable=protected-access
def test_mclag_cold_start(self):
"""Test cold-starting a switch with a downed port resets LACP states"""
self.activate_all_ports()
valve = self.valves_manager.valves[0x1]
other_valves = self.get_other_valves(valve)
port = valve.dp.ports[3]
# Make sure LACP state has been updated
self.assertTrue(valve.lacp_update(port, True, 1, 1, other_valves), 'No OFMSGS returned')
self.assertTrue(port.is_actor_up(), 'Actor not UP')
# Set port DOWN
valve.port_delete(3, other_valves=other_valves)
self.assertTrue(port.is_actor_none(), 'Actor not NONE')
# Restart switch & LACP port
self.cold_start()
self.assertTrue(valve.port_add(3), 'No OFMSGS returned')
# Successfully restart LACP from downed
self.assertTrue(valve.lacp_update(port, True, 1, 1, other_valves), 'No OFMSGS returned')
self.assertTrue(port.is_actor_up(), 'Actor not UP')
class ValveStackMCLAGStandbyTestCase(ValveTestBases.ValveTestNetwork):
"""Test MCLAG with standby port option overrules unselected states"""
CONFIG = """
dps:
s1:
%s
stack:
priority: 1
interfaces:
1:
description: p1
stack:
dp: s2
port: 1
2:
description: p3
native_vlan: 100
lacp_standby: True
lacp: 1
3:
description: p4
native_vlan: 100
lacp_standby: True
lacp: 1
s2:
hardware: 'GenericTFM'
dp_id: 0x2
interfaces:
1:
description: p1
stack:
dp: s1
port: 1
2:
description: p3
native_vlan: 100
lacp_standby: True
lacp: 1
3:
description: p4
native_vlan: 100
lacp_standby: True
lacp: 1
""" % BASE_DP1_CONFIG
def setUp(self):
"""Setup basic loop config"""
self.setup_valves(self.CONFIG)
def get_other_valves(self, valve):
"""Return other running valves"""
return self.valves_manager._other_running_valves(valve) # pylint: disable=protected-access
def test_mclag_standby_option(self):
"""Test MCLAG standby option forces standby state instead of unselected"""
self.activate_all_ports()
valve = self.valves_manager.valves[0x1]
other_valve = self.valves_manager.valves[0x2]
for port in valve.dp.ports.values():
if port.lacp:
valve.lacp_update(port, True, 1, 1, self.get_other_valves(valve))
self.assertTrue(port.is_port_selected())
for port in other_valve.dp.ports.values():
if port.lacp:
other_valve.lacp_update(port, True, 1, 1, self.get_other_valves(other_valve))
self.assertTrue(port.is_port_standby())
for port in valve.dp.ports.values():
if port.lacp:
valve.lacp_update(port, False, 1, 1, self.get_other_valves(valve))
self.assertTrue(port.is_port_standby())
for port in other_valve.dp.ports.values():
if port.lacp:
other_valve.lacp_update(port, True, 1, 1, self.get_other_valves(other_valve))
self.assertTrue(port.is_port_selected())
class ValveStackRootExtLoopProtectTestCase(ValveTestBases.ValveTestNetwork):
"""External loop protect test cases"""
CONFIG = """
dps:
s1:
%s
stack:
priority: 1
interfaces:
1:
description: p1
stack:
dp: s2
port: 1
2:
description: p2
native_vlan: 100
3:
description: p3
native_vlan: 100
loop_protect_external: True
4:
description: p4
native_vlan: 100
loop_protect_external: True
s2:
hardware: 'GenericTFM'
dp_id: 0x2
interfaces:
1:
description: p1
stack:
dp: s1
port: 1
2:
description: p2
native_vlan: 100
3:
description: p3
native_vlan: 100
loop_protect_external: True
4:
description: p4
native_vlan: 100
loop_protect_external: True
""" % BASE_DP1_CONFIG
def setUp(self):
self.setup_valves(self.CONFIG)
self.set_stack_port_up(1)
def test_loop_protect(self):
"""test basic loop protection"""
mcast_match = {
'in_port': 2,
'eth_dst': mac.BROADCAST_STR,
'vlan_vid': 0,
'eth_type': 0x800,
'ipv4_dst': '224.0.0.5',
}
table = self.network.tables[self.DP_ID]
self.assertTrue(
table.is_output(mcast_match, port=1),
msg='mcast packet not flooded to non-root stack')
self.assertTrue(
table.is_output(mcast_match, port=3),
msg='mcast packet not flooded locally on root')
self.assertFalse(
table.is_output(mcast_match, port=4),
msg='mcast packet multiply flooded externally on root')
class ValveStackChainTest(ValveTestBases.ValveTestNetwork):
"""Test base class for loop stack config"""
CONFIG = STACK_CONFIG
DP = 's2'
DP_ID = 2
def setUp(self):
"""Setup basic loop config"""
self.setup_valves(self.CONFIG)
def learn_stack_hosts(self):
"""Learn some hosts."""
for _ in range(2):
self.rcv_packet(3, 0, self.pkt_match(1, 2), dp_id=1)
self.rcv_packet(1, 0, self.pkt_match(1, 2), dp_id=2)
self.rcv_packet(4, 0, self.pkt_match(2, 1), dp_id=2)
self.rcv_packet(1, 0, self.pkt_match(2, 1), dp_id=1)
self.rcv_packet(1, 0, self.pkt_match(3, 2), dp_id=3)
self.rcv_packet(3, 0, self.pkt_match(3, 2), dp_id=2)
def _unicast_to(self, out_port, trace=False):
ucast_match = {
'in_port': 4,
'eth_src': self.P2_V100_MAC,
'eth_dst': self.P1_V100_MAC,
'vlan_vid': 0,
'eth_type': 0x800,
}
table = self.network.tables[self.DP_ID]
return table.is_output(ucast_match, port=out_port, trace=trace)
def _learning_from_bcast(self, in_port):
ucast_match = {
'in_port': in_port,
'eth_src': self.P1_V100_MAC,
'eth_dst': self.BROADCAST_MAC,
'vlan_vid': self.V100,
'eth_type': 0x800,
}
table = self.network.tables[self.DP_ID]
return table.is_output(ucast_match, port=CONTROLLER_PORT)
def validate_edge_learn_ports(self):
"""Validate the switch behavior before learning, and then learn hosts"""
# Before learning, unicast should flood to stack root and packet-in.
self.assertFalse(self._unicast_to(1), 'unlearned unicast to stack root')
self.assertFalse(self._unicast_to(2), 'unlearned unicast to stack root')
self.assertTrue(self._unicast_to(3), 'unlearned unicast away from stack root')
self.assertTrue(self._unicast_to(CONTROLLER_PORT), 'unlearned unicast learn')
self.assertFalse(self._learning_from_bcast(1), 'learn from stack root broadcast')
self.assertFalse(self._learning_from_bcast(4), 'learn from access port broadcast')
self.learn_stack_hosts()
self.assertFalse(self._unicast_to(1), 'learned unicast to stack root')
self.assertFalse(self._unicast_to(2), 'learned unicast to stack root')
self.assertTrue(self._unicast_to(3), 'learned unicast away from stack root')
self.assertFalse(self._unicast_to(CONTROLLER_PORT), 'no learn from unicast')
self.assertFalse(self._learning_from_bcast(1), 'learn from stack root broadcast')
self.assertFalse(self._learning_from_bcast(4), 'learn from access port broadcast')
def test_stack_learn_edge(self):
"""Test stack learned edge"""
self.activate_all_ports()
self.validate_edge_learn_ports()
def test_stack_learn_not_root(self):
"""Test stack learned when not root"""
self.update_config(self._config_edge_learn_stack_root(False), reload_type='warm')
self.activate_all_ports()
self.validate_edge_learn_ports()
class ValveStackLoopTest(ValveTestBases.ValveTestNetwork):
"""Test base class for loop stack config"""
CONFIG = STACK_LOOP_CONFIG
def setUp(self):
"""Setup basic loop config"""
self.setup_valves(self.CONFIG)
def validate_flooding(self, rerouted=False, portup=True):
"""Validate the flooding state of the stack"""
vid = self.V100
self.validate_flood(1, vid, 1, False, 'flooded out input stack port')
self.validate_flood(1, vid, 2, portup, 'not flooded to stack root')
self.validate_flood(1, vid, 3, portup, 'not flooded to external host')
self.validate_flood(2, vid, 1, rerouted, 'flooded out other stack port')
self.validate_flood(2, vid, 2, False, 'flooded out input stack port')
self.validate_flood(2, vid, 3, True, 'not flooded to external host')
vid = 0
self.validate_flood(3, vid, 1, rerouted, 'flooded out inactive port')
self.validate_flood(3, vid, 2, True, 'not flooded to stack root')
self.validate_flood(3, vid, 3, False, 'flooded out hairpin')
def learn_stack_hosts(self):
"""Learn some hosts."""
for _ in range(2):
self.rcv_packet(3, 0, self.pkt_match(1, 2), dp_id=1)
self.rcv_packet(2, 0, self.pkt_match(1, 2), dp_id=2)
self.rcv_packet(3, 0, self.pkt_match(2, 1), dp_id=2)
self.rcv_packet(2, 0, self.pkt_match(2, 1), dp_id=1)
class ValveStackEdgeLearnTestCase(ValveStackLoopTest):
"""Edge learning test cases"""
def _unicast_to(self, out_port):
ucast_match = {
'in_port': 3,
'eth_src': self.P1_V100_MAC,
'eth_dst': self.P2_V100_MAC,
'vlan_vid': 0,
'eth_type': 0x800,
}
table = self.network.tables[self.DP_ID]
return table.is_output(ucast_match, port=out_port)
def _learning_from_bcast(self, in_port):
bcast_match = {
'in_port': in_port,
'eth_src': self.P2_V100_MAC,
'eth_dst': self.BROADCAST_MAC,
'vlan_vid': self.V100,
'eth_type': 0x800,
}
table = self.network.tables[self.DP_ID]
return table.is_output(bcast_match, port=CONTROLLER_PORT)
def validate_edge_learn_ports(self):
"""Validate the switch behavior before learning, and then learn hosts"""
# Before learning, unicast should flood to stack root and packet-in.
self.assertFalse(self._unicast_to(1), 'unicast direct to edge')
self.assertTrue(self._unicast_to(2), 'unicast to stack root')
self.assertTrue(self._unicast_to(CONTROLLER_PORT), 'learn from unicast')
self.assertTrue(self._learning_from_bcast(2), 'learn from stack root broadcast')
self.learn_stack_hosts()
self.assertFalse(self._unicast_to(CONTROLLER_PORT), 'learn from unicast')
def test_edge_learn_edge_port(self):
"""Check the behavior of the basic edge_learn_port algorithm"""
self.update_config(self._config_edge_learn_stack_root(False), reload_type='warm')
self.activate_all_ports()
self.validate_edge_learn_ports()
# After learning, unicast should go direct to edge switch.
self.assertTrue(self._unicast_to(1), 'unicast direct to edge')
self.assertFalse(self._unicast_to(2), 'unicast to stack root')
# TODO: This should be False to prevent unnecessary packet-ins.
self.assertTrue(self._learning_from_bcast(2), 'learn from stack root broadcast')
def test_edge_learn_stack_root(self):
"""Check the behavior of learning always towards stack root"""
self.activate_all_ports()
self.validate_edge_learn_ports()
# After learning, unicast should go to stack root, and no more learning from root.
self.assertFalse(self._unicast_to(1), 'unicast direct to edge')
self.assertTrue(self._unicast_to(2), 'unicast to stack root')
self.assertFalse(self._learning_from_bcast(2), 'learn from stack root broadcast')
class ValveStackRedundantLink(ValveStackLoopTest):
"""Check stack situations with a redundant link"""
def test_loop_protect(self):
"""Basic loop protection check"""
self.activate_all_ports()
mcast_match = {
'in_port': 3,
'eth_dst': mac.BROADCAST_STR,
'vlan_vid': 0,
'eth_type': 0x800,
'ipv4_dst': '224.0.0.5',
}
table = self.network.tables[self.DP_ID]
valve = self.valves_manager.valves[self.DP_ID]
self.assertTrue(
table.is_output(mcast_match, port=2),
msg='mcast packet not flooded to root of stack')
self.assertFalse(valve.dp.ports[2].non_stack_forwarding())
self.assertFalse(
table.is_output(mcast_match, port=1),
msg='mcast packet flooded root of stack via not shortest path')
self.deactivate_stack_port(valve.dp.ports[2])
self.assertFalse(valve.dp.ports[2].non_stack_forwarding())
self.assertFalse(
table.is_output(mcast_match, port=2),
msg='mcast packet flooded to root of stack via redundant path')
self.assertFalse(valve.dp.ports[2].non_stack_forwarding())
self.assertTrue(
table.is_output(mcast_match, port=1),
msg='mcast packet not flooded root of stack')
self.assertFalse(valve.dp.ports[2].non_stack_forwarding())
self.assertTrue(valve.dp.ports[3].non_stack_forwarding())
class ValveStackNonRootExtLoopProtectTestCase(ValveTestBases.ValveTestNetwork):
"""Test non-root external loop protect"""
CONFIG = """
dps:
s1:
%s
interfaces:
1:
description: p1
stack:
dp: s2
port: 1
2:
description: p2
native_vlan: 100
3:
description: p3
native_vlan: 100
loop_protect_external: True
4:
description: p4
native_vlan: 100
loop_protect_external: True
s2:
hardware: 'GenericTFM'
dp_id: 0x2
interfaces:
1:
description: p1
stack:
dp: s1
port: 1
2:
description: p2
stack:
dp: s3
port: 1
3:
description: p2
native_vlan: 100
s3:
hardware: 'GenericTFM'
dp_id: 0x3
stack:
priority: 1
interfaces:
1:
description: p1
stack:
dp: s2
port: 2
2:
description: p2
native_vlan: 100
""" % BASE_DP1_CONFIG
def setUp(self):
self.setup_valves(self.CONFIG)
self.set_stack_port_up(1)
def test_loop_protect(self):
"""Test expected table outputs for external loop protect"""
mcast_match = {
'in_port': 2,
'eth_dst': mac.BROADCAST_STR,
'vlan_vid': 0,
'eth_type': 0x800,
'ipv4_dst': '224.0.0.5',
}
table = self.network.tables[self.DP_ID]
self.assertTrue(
table.is_output(mcast_match, port=1),
msg='mcast packet not flooded to root of stack')
self.assertFalse(
table.is_output(mcast_match, port=3),
msg='mcast packet flooded locally on non-root')
self.assertFalse(
table.is_output(mcast_match, port=4),
msg='mcast packet flooded locally on non-root')
class ValveStackAndNonStackTestCase(ValveTestBases.ValveTestNetwork):
"""Test stacked switches can exist with non-stacked switches"""
CONFIG = """
dps:
s1:
%s
stack:
priority: 1
interfaces:
1:
description: p1
stack:
dp: s2
port: 1
2:
description: p2
native_vlan: 0x100
s2:
hardware: 'GenericTFM'
dp_id: 0x2
interfaces:
1:
description: p1
stack:
dp: s1
port: 1
2:
description: p2
native_vlan: 0x100
s3:
hardware: 'GenericTFM'
dp_id: 0x3
interfaces:
1:
description: p1
native_vlan: 0x100
2:
description: p2
native_vlan: 0x100
""" % BASE_DP1_CONFIG
def setUp(self):
self.setup_valves(self.CONFIG)
def test_nonstack_dp_port(self):
"""Test that finding a path from a stack swithc to a non-stack switch cannot happen"""
self.assertEqual(None, self.valves_manager.valves[0x3].dp.shortest_path_port('s1'))
class ValveStackRedundancyTestCase(ValveTestBases.ValveTestNetwork):
"""Valve test for root selection."""
CONFIG = STACK_CONFIG
def setUp(self):
self.setup_valves(self.CONFIG)
def dp_by_name(self, dp_name):
"""Get DP by DP name"""
for valve in self.valves_manager.valves.values():
if valve.dp.name == dp_name:
return valve.dp
return None
def set_stack_all_ports_status(self, dp_name, status):
"""Set all stack ports to status on dp"""
dp = self.dp_by_name(dp_name)
for port in dp.stack_ports:
port.dyn_stack_current_state = status
def test_redundancy(self):
"""Test redundant stack connections"""
now = 1
self.trigger_stack_ports()
# All switches are down to start with.
for dpid in self.valves_manager.valves:
dp = self.valves_manager.valves[dpid].dp
dp.dyn_running = False
self.set_stack_all_ports_status(dp.name, STACK_STATE_INIT)
for valve in self.valves_manager.valves.values():
self.assertFalse(valve.dp.dyn_running)
self.assertEqual('s1', valve.dp.stack_root_name)
root_hop_port = valve.dp.shortest_path_port('s1')
root_hop_port = root_hop_port.number if root_hop_port else 0
self.assertEqual(root_hop_port, self.get_prom('dp_root_hop_port', dp_id=valve.dp.dp_id))
# From a cold start - we pick the s1 as root.
self.assertEqual(None, self.valves_manager.meta_dp_state.stack_root_name)
self.assertFalse(self.valves_manager.maintain_stack_root(now))
self.assertEqual('s1', self.valves_manager.meta_dp_state.stack_root_name)
self.assertEqual(1, self.get_prom('faucet_stack_root_dpid', bare=True))
self.assertTrue(self.get_prom('is_dp_stack_root', dp_id=1))
self.assertFalse(self.get_prom('is_dp_stack_root', dp_id=2))
now += (valves_manager.STACK_ROOT_DOWN_TIME * 2)
# Time passes, still no change, s1 is still the root.
self.assertFalse(self.valves_manager.maintain_stack_root(now))
self.assertEqual('s1', self.valves_manager.meta_dp_state.stack_root_name)
self.assertEqual(1, self.get_prom('faucet_stack_root_dpid', bare=True))
self.assertTrue(self.get_prom('is_dp_stack_root', dp_id=1))
self.assertFalse(self.get_prom('is_dp_stack_root', dp_id=2))
# s2 has come up, but has all stack ports down and but s1 is still down.
self.valves_manager.meta_dp_state.dp_last_live_time['s2'] = now
now += (valves_manager.STACK_ROOT_STATE_UPDATE_TIME * 2)
# No change because s2 still isn't healthy.
self.assertFalse(self.valves_manager.maintain_stack_root(now))
# We expect s2 to be the new root because now it has stack links up.
self.set_stack_all_ports_status('s2', STACK_STATE_UP)
now += (valves_manager.STACK_ROOT_STATE_UPDATE_TIME * 2)
self.valves_manager.meta_dp_state.dp_last_live_time['s2'] = now
self.assertTrue(self.valves_manager.maintain_stack_root(now))
self.assertEqual('s2', self.valves_manager.meta_dp_state.stack_root_name)
self.assertEqual(2, self.get_prom('faucet_stack_root_dpid', bare=True))
self.assertFalse(self.get_prom('is_dp_stack_root', dp_id=1))
self.assertTrue(self.get_prom('is_dp_stack_root', dp_id=2))
# More time passes, s1 is still down, s2 is still the root.
now += (valves_manager.STACK_ROOT_DOWN_TIME * 2)
# s2 recently said something, s2 still the root.
self.valves_manager.meta_dp_state.dp_last_live_time['s2'] = now - 1
self.set_stack_all_ports_status('s2', STACK_STATE_UP)
self.assertFalse(self.valves_manager.maintain_stack_root(now))
self.assertEqual('s2', self.valves_manager.meta_dp_state.stack_root_name)
self.assertEqual(2, self.get_prom('faucet_stack_root_dpid', bare=True))
self.assertFalse(self.get_prom('is_dp_stack_root', dp_id=1))
self.assertTrue(self.get_prom('is_dp_stack_root', dp_id=2))
# now s1 came up too, but we stay on s2 because it's healthy.
self.valves_manager.meta_dp_state.dp_last_live_time['s1'] = now + 1
now += valves_manager.STACK_ROOT_STATE_UPDATE_TIME
self.assertFalse(self.valves_manager.maintain_stack_root(now))
self.assertEqual('s2', self.valves_manager.meta_dp_state.stack_root_name)
self.assertEqual(2, self.get_prom('faucet_stack_root_dpid', bare=True))
self.assertFalse(self.get_prom('is_dp_stack_root', dp_id=1))
self.assertTrue(self.get_prom('is_dp_stack_root', dp_id=2))
class ValveRootStackTestCase(ValveTestBases.ValveTestNetwork):
"""Test stacking/forwarding."""
DP = 's3'
DP_ID = 0x3
def setUp(self):
self.setup_valves(CONFIG)
self.set_stack_port_up(5)
def test_stack_learn(self):
"""Test host learning on stack root."""
self.prom_inc(
partial(self.rcv_packet, 1, 0x300, {
'eth_src': self.P1_V300_MAC,
'eth_dst': self.UNKNOWN_MAC,
'ipv4_src': '10.0.0.1',
'ipv4_dst': '10.0.0.2'}),
'vlan_hosts_learned',
labels={'vlan': str(int(0x300))})
def test_stack_flood(self):
"""Test packet flooding when stacking."""
matches = [
{
'in_port': 1,
'vlan_vid': 0,
'eth_src': self.P1_V300_MAC
}]
self.verify_flooding(matches)
def test_stack_off_on(self):
SIMPLE_DP_CONFIG = """
dps:
s3:
dp_id: 3
hardware: Open vSwitch
interfaces:
1:
native_vlan: 100
"""
self.update_config(SIMPLE_DP_CONFIG, reload_expected=True)
dp = self.valves_manager.valves[self.DP_ID].dp
self.assertFalse(dp.is_stack_root())
self.update_config(CONFIG, reload_expected=True)
self.set_stack_port_up(5)
dp = self.valves_manager.valves[self.DP_ID].dp
self.assertTrue(dp.is_stack_root())
def test_topo(self):
"""Test DP is assigned appropriate edge/root states"""
dp = self.valves_manager.valves[self.DP_ID].dp
self.assertTrue(dp.is_stack_root())
self.assertFalse(dp.is_stack_edge())
class ValveEdgeStackTestCase(ValveTestBases.ValveTestNetwork):
"""Test stacking/forwarding."""
DP = 's4'
DP_ID = 0x4
def setUp(self):
self.setup_valves(CONFIG)
self.set_stack_port_up(5)
def test_stack_learn(self):
"""Test host learning on non-root switch."""
self.rcv_packet(1, 0x300, {
'eth_src': self.P1_V300_MAC,
'eth_dst': self.UNKNOWN_MAC,
'ipv4_src': '10.0.0.1',
'ipv4_dst': '10.0.0.2'})
self.rcv_packet(5, 0x300, {
'eth_src': self.P1_V300_MAC,
'eth_dst': self.UNKNOWN_MAC,
'vid': 0x300,
'ipv4_src': '10.0.0.1',
'ipv4_dst': '10.0.0.2'})
def test_stack_flood(self):
"""Test packet flooding when stacking."""
matches = [
{
'in_port': 1,
'vlan_vid': 0,
'eth_src': self.P1_V300_MAC
}]
self.verify_flooding(matches)
def test_no_unexpressed_packetin(self):
"""Test host learning on stack root."""
unexpressed_vid = 0x666 | ofp.OFPVID_PRESENT
match = {
'vlan_vid': unexpressed_vid,
'eth_dst': self.UNKNOWN_MAC}
table = self.network.tables[self.DP_ID]
self.assertFalse(
table.is_output(match, port=ofp.OFPP_CONTROLLER, vid=unexpressed_vid))
def test_topo(self):
"""Test DP is assigned appropriate edge/root states"""
dp = self.valves_manager.valves[self.DP_ID].dp
self.assertFalse(dp.is_stack_root())
self.assertTrue(dp.is_stack_edge())
class ValveStackProbeTestCase(ValveTestBases.ValveTestNetwork):
"""Test stack link probing."""
CONFIG = STACK_CONFIG
def setUp(self):
self.setup_valves(self.CONFIG)
def test_stack_probe(self):
"""Test probing works correctly."""
valve = self.valves_manager.valves[self.DP_ID]
stack_port = valve.dp.ports[1]
other_dp = self.valves_manager.valves[2].dp
other_port = other_dp.ports[1]
other_valves = self.valves_manager._other_running_valves(valve) # pylint: disable=protected-access
self.assertTrue(stack_port.is_stack_none())
valve.fast_state_expire(self.mock_time(), other_valves)
self.assertTrue(stack_port.is_stack_init())
for change_func, check_func in [
('stack_up', 'is_stack_up')]:
getattr(other_port, change_func)()
self.rcv_lldp(stack_port, other_dp, other_port)
self.assertTrue(getattr(stack_port, check_func)(), msg=change_func)
def test_stack_miscabling(self):
"""Test probing stack with miscabling."""
valve = self.valves_manager.valves[self.DP_ID]
stack_port = valve.dp.ports[1]
other_dp = self.valves_manager.valves[2].dp
other_port = other_dp.ports[1]
wrong_port = other_dp.ports[2]
wrong_dp = self.valves_manager.valves[3].dp
other_valves = self.valves_manager._other_running_valves(valve) # pylint: disable=protected-access
valve.fast_state_expire(self.mock_time(), other_valves)
for remote_dp, remote_port in [
(wrong_dp, other_port),
(other_dp, wrong_port)]:
self.rcv_lldp(stack_port, other_dp, other_port)
self.assertTrue(stack_port.is_stack_up())
self.rcv_lldp(stack_port, remote_dp, remote_port)
self.assertTrue(stack_port.is_stack_bad())
def test_stack_lost_lldp(self):
"""Test stacking when LLDP packets get dropped"""
valve = self.valves_manager.valves[self.DP_ID]
stack_port = valve.dp.ports[1]
other_dp = self.valves_manager.valves[2].dp
other_port = other_dp.ports[1]
other_valves = self.valves_manager._other_running_valves(valve) # pylint: disable=protected-access
valve.fast_state_expire(self.mock_time(), other_valves)
self.rcv_lldp(stack_port, other_dp, other_port)
self.assertTrue(stack_port.is_stack_up())
# simulate packet loss
valve.fast_state_expire(self.mock_time(300), other_valves)
self.assertTrue(stack_port.is_stack_gone())
valve.fast_state_expire(self.mock_time(300), other_valves)
self.rcv_lldp(stack_port, other_dp, other_port)
self.assertTrue(stack_port.is_stack_up())
class ValveStackGraphUpdateTestCase(ValveTestBases.ValveTestNetwork):
"""Valve test for updating the stack graph."""
CONFIG = STACK_CONFIG
def setUp(self):
self.setup_valves(self.CONFIG)
def test_update_stack_graph(self):
"""Test stack graph port UP and DOWN updates"""
def verify_stack_learn_edges(num_edges, edge=None, test_func=None):
for dpid in (1, 2, 3):
valve = self.valves_manager.valves[dpid]
if not valve.dp.stack:
continue
graph = valve.dp.stack_graph
self.assertEqual(num_edges, len(graph.edges()))
if test_func and edge:
test_func(edge in graph.edges(keys=True))
num_edges = 3
self.all_stack_up()
verify_stack_learn_edges(num_edges)
valve = self.valves_manager.valves[self.DP_ID]
ports = [valve.dp.ports[1], valve.dp.ports[2]]
edges = [('s1', 's2', 's1:1-s2:1'), ('s1', 's2', 's1:2-s2:2')]
for port, edge in zip(ports, edges):
num_edges -= 1
self.down_stack_port(port)
verify_stack_learn_edges(num_edges, edge, self.assertFalse)
self.up_stack_port(ports[0])
verify_stack_learn_edges(2, edges[0], self.assertTrue)
class ValveStackGraphBreakTestCase(ValveStackLoopTest):
"""Valve test for updating the stack graph."""
def test_update_stack_graph(self):
"""Test stack graph port UP and DOWN updates"""
self.activate_all_ports()
self.validate_flooding(False)
table = self.network.tables[self.DP_ID]
self.assertLessEqual(table.flow_count(), 33, 'table overflow')
# Deactivate link between the two other switches, not the one under test.
other_dp = self.valves_manager.valves[2].dp
other_port = other_dp.ports[2]
self.deactivate_stack_port(other_port)
self.validate_flooding(rerouted=True)
def _set_max_lldp_lost(self, new_value):
"""Set the interface config option max_lldp_lost"""
config = yaml.load(self.CONFIG, Loader=yaml.SafeLoader)
for dp in config['dps'].values():
for interface in dp['interfaces'].values():
if 'stack' in interface:
interface['max_lldp_lost'] = new_value
return yaml.dump(config)
def test_max_lldp_timeout(self):
"""Check that timeout can be increased"""
valve = self.valves_manager.valves[self.DP_ID]
port = valve.dp.ports[1]
self.activate_all_ports()
self.validate_flooding()
# Deactivating the port stops simulating LLDP beacons.
self.deactivate_stack_port(port, packets=1)
# Should still work after only 1 interval (3 required by default)
self.validate_flooding()
# Wait for 3 more cycles, so should fail now.
self.trigger_all_ports(packets=3)
# Validate expected normal behavior with the port down.
self.validate_flooding(portup=False)
# Restore everything and set max_lldp_lost to 100.
self.activate_stack_port(port)
self.validate_flooding()
new_config = self._set_max_lldp_lost(100)
self.update_config(new_config, reload_expected=False, no_reload_no_table_change=False)
self.activate_all_ports()
self.validate_flooding()
# Like above, deactivate the port (stops LLDP beacons).
self.deactivate_stack_port(port, packets=10)
# After 10 packets (more than before), it should still work.
self.validate_flooding()
# But, after 100 more port should be down b/c limit is set to 100.
self.trigger_all_ports(packets=100)
self.validate_flooding(portup=False)
class ValveTestIPV4StackedRouting(ValveTestBases.ValveTestStackedRouting):
"""Test inter-vlan routing with stacking capabilities in an IPV4 network"""
VLAN100_FAUCET_VIPS = '10.0.1.254'
VLAN100_FAUCET_VIP_SPACE = '10.0.1.254/24'
VLAN200_FAUCET_VIPS = '10.0.2.254'
VLAN200_FAUCET_VIP_SPACE = '10.0.2.254/24'
def setUp(self):
self.setup_stack_routing()
class ValveTestIPV4StackedRoutingDPOneVLAN(ValveTestBases.ValveTestStackedRouting):
"""Test stacked intervlan routing when each DP has only one of the routed VLANs"""
VLAN100_FAUCET_VIPS = '10.0.1.254'
VLAN100_FAUCET_VIP_SPACE = '10.0.1.254/24'
VLAN200_FAUCET_VIPS = '10.0.2.254'
VLAN200_FAUCET_VIP_SPACE = '10.0.2.254/24'
NUM_PORTS = 64
def base_config(self):
"""Create the base config"""
self.V100_HOSTS = [1]
self.V200_HOSTS = [2]
return """
routers:
router1:
vlans: [vlan100, vlan200]
dps:
s1:
hardware: 'GenericTFM'
dp_id: 1
stack: {priority: 1}
interfaces:
1:
native_vlan: vlan100
3:
stack: {dp: s2, port: 3}
interface_ranges:
4-64:
native_vlan: vlan100
s2:
dp_id: 2
hardware: 'GenericTFM'
interfaces:
2:
native_vlan: vlan200
3:
stack: {dp: s1, port: 3}
"""
def setUp(self):
self.setup_stack_routing()
class ValveTestIPV4StackedRoutingPathNoVLANS(ValveTestBases.ValveTestStackedRouting):
"""Test stacked intervlan routing when DP in path contains no routed VLANs"""
VLAN100_FAUCET_VIPS = '10.0.1.254'
VLAN100_FAUCET_VIP_SPACE = '10.0.1.254/24'
VLAN200_FAUCET_VIPS = '10.0.2.254'
VLAN200_FAUCET_VIP_SPACE = '10.0.2.254/24'
def create_config(self):
"""Create the config file"""
self.CONFIG = """
vlans:
vlan100:
vid: 0x100
faucet_mac: '%s'
faucet_vips: ['%s']
vlan200:
vid: 0x200
faucet_mac: '%s'
faucet_vips: ['%s']
vlan300:
vid: 0x300
%s
""" % (self.VLAN100_FAUCET_MAC, self.VLAN100_FAUCET_VIP_SPACE,
self.VLAN200_FAUCET_MAC, self.VLAN200_FAUCET_VIP_SPACE,
self.base_config())
def base_config(self):
"""Create the base config"""
self.V100_HOSTS = [1]
self.V200_HOSTS = [3]
return """
routers:
router1:
vlans: [vlan100, vlan200]
dps:
s1:
hardware: 'GenericTFM'
dp_id: 1
stack: {priority: 1}
interfaces:
1:
native_vlan: vlan100
3:
stack: {dp: s2, port: 3}
s2:
dp_id: 2
hardware: 'GenericTFM'
interfaces:
2:
native_vlan: vlan300
3:
stack: {dp: s1, port: 3}
4:
stack: {dp: s3, port: 3}
s3:
dp_id: 3
hardware: 'GenericTFM'
interfaces:
2:
native_vlan: vlan200
3:
stack: {dp: s2, port: 4}
4:
stack: {dp: s4, port: 3}
s4:
dp_id: 4
hardware: 'GenericTFM'
interfaces:
2:
native_vlan: vlan300
3:
stack: {dp: s3, port: 4}
"""
def setUp(self):
self.setup_stack_routing()
class ValveTestIPV6StackedRouting(ValveTestBases.ValveTestStackedRouting):
"""Test inter-vlan routing with stacking capabilities in an IPV6 network"""
VLAN100_FAUCET_VIPS = 'fc80::1:254'
VLAN200_FAUCET_VIPS = 'fc80::2:254'
VLAN100_FAUCET_VIP_SPACE = 'fc80::1:254/64'
VLAN200_FAUCET_VIP_SPACE = 'fc80::1:254/64'
def setUp(self):
self.setup_stack_routing()
@staticmethod
def create_ip(vindex, host):
"""Create a IP address string"""
return 'fc80::%u:%u' % (vindex, host)
@staticmethod
def get_eth_type():
"""Returns IPV6 ether type"""
return valve_of.ether.ETH_TYPE_IPV6
def create_match(self, vindex, host, faucet_mac, faucet_vip, code):
"""Create an NA message"""
return {
'eth_src': self.create_mac(vindex, host),
'eth_dst': faucet_mac,
'ipv6_src': self.create_ip(vindex, host),
'ipv6_dst': faucet_vip,
'neighbor_advert_ip': self.create_ip(vindex, host)
}
class ValveInterVLANStackFlood(ValveTestBases.ValveTestNetwork):
"""Test that the stack ports get flooded to for interVLAN packets"""
VLAN100_FAUCET_MAC = '00:00:00:00:00:11'
VLAN200_FAUCET_MAC = '00:00:00:00:00:22'
VLAN100_FAUCET_VIPS = '10.1.0.254'
VLAN100_FAUCET_VIP_SPACE = '10.1.0.254/24'
VLAN200_FAUCET_VIPS = '10.2.0.254'
VLAN200_FAUCET_VIP_SPACE = '10.2.0.254/24'
DST_ADDRESS = ipaddress.IPv4Address('10.1.0.1')
def base_config(self):
"""Create the base config"""
return """
routers:
router1:
vlans: [vlan100, vlan200]
dps:
s1:
hardware: 'GenericTFM'
dp_id: 1
interfaces:
1:
native_vlan: vlan100
2:
native_vlan: vlan200
3:
stack: {dp: s2, port: 3}
s2:
dp_id: 2
hardware: 'GenericTFM'
stack: {priority: 1}
interfaces:
1:
native_vlan: vlan100
2:
native_vlan: vlan200
3:
stack: {dp: s1, port: 3}
4:
stack: {dp: s3, port: 3}
s3:
dp_id: 3
hardware: 'GenericTFM'
interfaces:
1:
native_vlan: vlan100
2:
native_vlan: vlan200
3:
stack: {dp: s2, port: 4}
4:
stack: {dp: s4, port: 3}
s4:
dp_id: 4
hardware: 'GenericTFM'
interfaces:
1:
native_vlan: vlan100
2:
native_vlan: vlan200
3:
stack: {dp: s3, port: 4}
"""
def create_config(self):
"""Create the config file"""
self.CONFIG = """
vlans:
vlan100:
vid: 100
faucet_mac: '%s'
faucet_vips: ['%s']
vlan200:
vid: 200
faucet_mac: '%s'
faucet_vips: ['%s']
%s
""" % (self.VLAN100_FAUCET_MAC, self.VLAN100_FAUCET_VIP_SPACE,
self.VLAN200_FAUCET_MAC, self.VLAN200_FAUCET_VIP_SPACE,
self.base_config())
def setUp(self):
"""Create a stacking config file."""
self.create_config()
self.setup_valves(self.CONFIG)
self.trigger_stack_ports()
def switch_manager_flood_ports(self, switch_manager):
"""Return list of port numbers that will be flooded to"""
return [port.number for port in switch_manager._stack_flood_ports()] # pylint: disable=protected-access
def route_manager_ofmsgs(self, route_manager, vlan):
"""Return ofmsgs for route stack link flooding"""
faucet_vip = list(vlan.faucet_vips_by_ipv(4))[0].ip
ofmsgs = route_manager._flood_stack_links( # pylint: disable=protected-access
route_manager._gw_resolve_pkt(), vlan, route_manager.multi_out, # pylint: disable=protected-access
vlan.faucet_mac, valve_of.mac.BROADCAST_STR,
faucet_vip, self.DST_ADDRESS)
return ofmsgs
def test_flood_towards_root_from_s1(self):
"""Test intervlan flooding goes towards the root"""
output_ports = [3]
valve = self.valves_manager.valves[1]
ports = self.switch_manager_flood_ports(valve.switch_manager)
self.assertEqual(output_ports, ports, 'InterVLAN flooding does not match expected')
route_manager = valve._route_manager_by_ipv.get(4, None)
vlan = valve.dp.vlans[100]
ofmsgs = self.route_manager_ofmsgs(route_manager, vlan)
self.assertTrue(ValveTestBases.packet_outs_from_flows(ofmsgs))
def test_flood_away_from_root(self):
"""Test intervlan flooding goes away from the root"""
output_ports = [3, 4]
valve = self.valves_manager.valves[2]
ports = self.switch_manager_flood_ports(valve.switch_manager)
self.assertEqual(output_ports, ports, 'InterVLAN flooding does not match expected')
route_manager = valve._route_manager_by_ipv.get(4, None)
vlan = valve.dp.vlans[100]
ofmsgs = self.route_manager_ofmsgs(route_manager, vlan)
self.assertTrue(ValveTestBases.packet_outs_from_flows(ofmsgs))
def test_flood_towards_root_from_s3(self):
"""Test intervlan flooding only goes towards the root (s4 will get the reflection)"""
output_ports = [3]
valve = self.valves_manager.valves[3]
ports = self.switch_manager_flood_ports(valve.switch_manager)
self.assertEqual(output_ports, ports, 'InterVLAN flooding does not match expected')
route_manager = valve._route_manager_by_ipv.get(4, None)
vlan = valve.dp.vlans[100]
ofmsgs = self.route_manager_ofmsgs(route_manager, vlan)
self.assertTrue(ValveTestBases.packet_outs_from_flows(ofmsgs))
def test_flood_towards_root_from_s4(self):
"""Test intervlan flooding goes towards the root (through s3)"""
output_ports = [3]
valve = self.valves_manager.valves[4]
ports = self.switch_manager_flood_ports(valve.switch_manager)
self.assertEqual(output_ports, ports, 'InterVLAN flooding does not match expected')
route_manager = valve._route_manager_by_ipv.get(4, None)
vlan = valve.dp.vlans[100]
ofmsgs = self.route_manager_ofmsgs(route_manager, vlan)
self.assertTrue(ValveTestBases.packet_outs_from_flows(ofmsgs))
class ValveTestTunnel2DP(ValveTestBases.ValveTestNetwork):
"""Test Tunnel ACL implementation"""
SRC_ID = 5
DST_ID = 2
SAME_ID = 4
NONE_ID = 3
CONFIG = """
acls:
src_acl:
- rule:
dl_type: 0x0800
ip_proto: 1
actions:
output:
tunnel: {dp: s2, port: 1}
dst_acl:
- rule:
dl_type: 0x0800
ip_proto: 1
actions:
output:
tunnel: {dp: s1, port: 1}
same_acl:
- rule:
dl_type: 0x0800
ip_proto: 1
actions:
output:
tunnel: {dp: s1, port: 1}
none_acl:
- rule:
dl_type: 0x0800
ip_proto: 1
actions:
output:
tunnel: {dp: s2, port: 1}
vlans:
vlan100:
vid: 1
dps:
s1:
dp_id: 0x1
hardware: 'GenericTFM'
stack:
priority: 1
interfaces:
1:
name: src_tunnel_host
native_vlan: vlan100
acls_in: [src_acl]
2:
name: same_tunnel_host
native_vlan: vlan100
acls_in: [same_acl]
3:
stack: {dp: s2, port: 3}
4:
stack: {dp: s2, port: 4}
s2:
dp_id: 0x2
hardware: 'GenericTFM'
interfaces:
1:
name: dst_tunnel_host
native_vlan: vlan100
acls_in: [dst_acl]
2:
name: transit_tunnel_host
native_vlan: vlan100
acls_in: [none_acl]
3:
stack: {dp: s1, port: 3}
4:
stack: {dp: s1, port: 4}
"""
def setUp(self):
"""Create a stacking config file."""
self.setup_valves(self.CONFIG)
self.activate_all_ports()
for valve in self.valves_manager.valves.values():
for port in valve.dp.ports.values():
if port.stack:
self.set_stack_port_up(port.number, valve)
def validate_tunnel(self, in_port, in_vid, out_port, out_vid, expected, msg):
bcast_match = {
'in_port': in_port,
'eth_dst': mac.BROADCAST_STR,
'eth_type': 0x0800,
'ip_proto': 1
}
if in_vid:
in_vid = in_vid | ofp.OFPVID_PRESENT
bcast_match['vlan_vid'] = in_vid
if out_vid:
out_vid = out_vid | ofp.OFPVID_PRESENT
table = self.network.tables[self.DP_ID]
if expected:
self.assertTrue(table.is_output(bcast_match, port=out_port, vid=out_vid), msg=msg)
else:
self.assertFalse(table.is_output(bcast_match, port=out_port, vid=out_vid), msg=msg)
def test_update_src_tunnel(self):
"""Test tunnel rules when encapsulating and forwarding to the destination switch"""
valve = self.valves_manager.valves[0x1]
port = valve.dp.ports[3]
# Apply tunnel to ofmsgs on valve
self.apply_ofmsgs(valve.switch_manager.add_tunnel_acls())
# Should encapsulate and output packet towards tunnel destination s3
self.validate_tunnel(
1, 0, 3, self.SRC_ID, True,
'Did not encapsulate and forward')
# Set the chosen port down to force a recalculation on the tunnel path
self.set_port_down(port.number)
ofmsgs = valve.switch_manager.add_tunnel_acls()
self.assertTrue(ofmsgs, 'No tunnel ofmsgs returned after a topology change')
self.apply_ofmsgs(ofmsgs)
# Should encapsulate and output packet using the new path
self.validate_tunnel(
1, 0, 4, self.SRC_ID, True,
'Did not encapsulate and forward out re-calculated port')
def test_update_same_tunnel(self):
"""Test tunnel rules when outputting to host on the same switch as the source"""
valve = self.valves_manager.valves[0x1]
self.apply_ofmsgs(valve.switch_manager.add_tunnel_acls())
self.validate_tunnel(2, 0, 1, 0, True, 'Did not forward to host on same DP')
def test_update_dst_tunnel(self):
"""Test a tunnel outputting to the correct tunnel destination"""
valve = self.valves_manager.valves[0x1]
port = valve.dp.ports[3]
# Apply tunnel to ofmsgs on valve
self.apply_ofmsgs(valve.switch_manager.add_tunnel_acls())
# Should accept encapsulated packet and output to the destination host
self.validate_tunnel(3, self.DST_ID, 1, 0, True, 'Did not output to host')
# Set the chosen port down to force a recalculation on the tunnel path
self.set_port_down(port.number)
ofmsgs = valve.switch_manager.add_tunnel_acls()
self.assertTrue(ofmsgs, 'No tunnel ofmsgs returned after a topology change')
self.apply_ofmsgs(ofmsgs)
# Should ccept encapsulated packet and output using the new path
self.validate_tunnel(4, self.DST_ID, 1, 0, True, 'Did not output to host')
def test_update_none_tunnel(self):
"""Test tunnel on a switch not using a tunnel ACL"""
valve = self.valves_manager.valves[0x1]
self.apply_ofmsgs(valve.switch_manager.add_tunnel_acls())
# Should drop any packets received from the tunnel
self.validate_tunnel(
5, self.NONE_ID, None, None, False,
'Should not output a packet')
self.validate_tunnel(
6, self.NONE_ID, None, None, False,
'Should not output a packet')
class ValveTestTransitTunnel(ValveTestBases.ValveTestNetwork):
"""Test tunnel ACL implementation"""
TRANSIT_ID = 2
CONFIG = """
acls:
transit_acl:
- rule:
dl_type: 0x0800
ip_proto: 1
actions:
output:
tunnel: {dp: s3, port: 1}
vlans:
vlan100:
vid: 1
dps:
s1:
dp_id: 0x1
hardware: 'GenericTFM'
stack:
priority: 1
interfaces:
3:
stack: {dp: s2, port: 3}
4:
stack: {dp: s2, port: 4}
5:
stack: {dp: s3, port: 5}
6:
stack: {dp: s3, port: 6}
s2:
dp_id: 0x2
hardware: 'GenericTFM'
interfaces:
1:
name: source_host
native_vlan: vlan100
acls_in: [transit_acl]
3:
stack: {dp: s1, port: 3}
4:
stack: {dp: s1, port: 4}
s3:
dp_id: 0x3
hardware: 'GenericTFM'
interfaces:
1:
name: destination_host
native_vlan: vlan100
5:
stack: {dp: s1, port: 5}
6:
stack: {dp: s1, port: 6}
"""
def setUp(self):
"""Create a stacking config file."""
self.setup_valves(self.CONFIG)
self.activate_all_ports()
for valve in self.valves_manager.valves.values():
for port in valve.dp.ports.values():
if port.stack:
self.set_stack_port_up(port.number, valve)
def validate_tunnel(self, in_port, in_vid, out_port, out_vid, expected, msg):
bcast_match = {
'in_port': in_port,
'eth_dst': mac.BROADCAST_STR,
'eth_type': 0x0800,
'ip_proto': 1
}
if in_vid:
in_vid = in_vid | ofp.OFPVID_PRESENT
bcast_match['vlan_vid'] = in_vid
if out_vid:
out_vid = out_vid | ofp.OFPVID_PRESENT
table = self.network.tables[self.DP_ID]
if expected:
self.assertTrue(table.is_output(bcast_match, port=out_port, vid=out_vid), msg=msg)
else:
self.assertFalse(table.is_output(bcast_match, port=out_port, vid=out_vid), msg=msg)
def test_update_transit_tunnel(self):
"""Test a tunnel through a transit switch (forwards to the correct switch)"""
valve = self.valves_manager.valves[0x1]
port1 = valve.dp.ports[3]
port2 = valve.dp.ports[5]
# Apply tunnel to ofmsgs on valve
self.apply_ofmsgs(valve.switch_manager.add_tunnel_acls())
# Should accept packet from stack and output to the next switch
self.validate_tunnel(
3, self.TRANSIT_ID, 5, self.TRANSIT_ID, True,
'Did not output to next switch')
# Set the chosen port down to force a recalculation on the tunnel path
self.set_port_down(port1.number)
# Should accept encapsulated packet and output using the new path
self.validate_tunnel(
4, self.TRANSIT_ID, 5, self.TRANSIT_ID, True,
'Did not output to next switch')
# Set the chosen port to the next switch down to force a path recalculation
self.set_port_down(port2.number)
ofmsgs = valve.switch_manager.add_tunnel_acls()
self.assertTrue(ofmsgs, 'No tunnel ofmsgs returned after a topology change')
self.apply_ofmsgs(ofmsgs)
# Should accept encapsulated packet and output using the new path
self.validate_tunnel(
4, self.TRANSIT_ID, 6, self.TRANSIT_ID, True,
'Did not output to next switch')
class ValveTestMultipleTunnel(ValveTestBases.ValveTestNetwork):
"""Test tunnel ACL implementation with multiple hosts containing tunnel ACL"""
TUNNEL_ID = 2
CONFIG = """
acls:
tunnel_acl:
- rule:
dl_type: 0x0800
ip_proto: 1
actions:
output:
tunnel: {dp: s2, port: 1}
vlans:
vlan100:
vid: 1
dps:
s1:
dp_id: 0x1
hardware: 'GenericTFM'
stack:
priority: 1
interfaces:
1:
native_vlan: vlan100
acls_in: [tunnel_acl]
2:
native_vlan: vlan100
acls_in: [tunnel_acl]
3:
stack: {dp: s2, port: 3}
4:
stack: {dp: s2, port: 4}
s2:
dp_id: 0x2
hardware: 'GenericTFM'
interfaces:
1:
native_vlan: vlan100
3:
stack: {dp: s1, port: 3}
4:
stack: {dp: s1, port: 4}
"""
def setUp(self):
"""Create a stacking config file."""
self.setup_valves(self.CONFIG)
self.activate_all_ports()
for valve in self.valves_manager.valves.values():
for port in valve.dp.ports.values():
if port.stack:
self.set_stack_port_up(port.number, valve)
def validate_tunnel(self, in_port, in_vid, out_port, out_vid, expected, msg):
bcast_match = {
'in_port': in_port,
'eth_dst': mac.BROADCAST_STR,
'eth_type': 0x0800,
'ip_proto': 1
}
if in_vid:
in_vid = in_vid | ofp.OFPVID_PRESENT
bcast_match['vlan_vid'] = in_vid
if out_vid:
out_vid = out_vid | ofp.OFPVID_PRESENT
table = self.network.tables[self.DP_ID]
if expected:
self.assertTrue(table.is_output(bcast_match, port=out_port, vid=out_vid), msg=msg)
else:
self.assertFalse(table.is_output(bcast_match, port=out_port, vid=out_vid), msg=msg)
def test_tunnel_update_multiple_tunnels(self):
"""Test having multiple hosts with the same tunnel"""
valve = self.valves_manager.valves[0x1]
port = valve.dp.ports[3]
# Apply tunnel to ofmsgs on valve
self.apply_ofmsgs(valve.switch_manager.add_tunnel_acls())
# Should encapsulate and output packet towards tunnel destination s3
self.validate_tunnel(
1, 0, 3, self.TUNNEL_ID, True,
'Did not encapsulate and forward')
self.validate_tunnel(
2, 0, 3, self.TUNNEL_ID, True,
'Did not encapsulate and forward')
# Set the chosen port down to force a recalculation on the tunnel path
self.set_port_down(port.number)
ofmsgs = valve.switch_manager.add_tunnel_acls()
self.assertTrue(ofmsgs, 'No tunnel ofmsgs returned after a topology change')
self.apply_ofmsgs(ofmsgs)
# Should encapsulate and output packet using the new path
self.validate_tunnel(
1, 0, 4, self.TUNNEL_ID, True,
'Did not encapsulate and forward out re-calculated port')
self.validate_tunnel(
1, 0, 4, self.TUNNEL_ID, True,
'Did not encapsulate and forward out re-calculated port')
class ValveTestOrderedTunnel2DP(ValveTestBases.ValveTestNetwork):
"""Test Tunnel ACL implementation"""
SRC_ID = 6
DST_ID = 2
SAME_ID = 4
NONE_ID = 3
CONFIG = """
acls:
src_acl:
- rule:
dl_type: 0x0800
ip_proto: 1
actions:
output:
- tunnel: {dp: s2, port: 1}
- rule:
dl_type: 0x86dd
ip_proto: 56
actions:
output:
- tunnel: {dp: s2, port: 1}
dst_acl:
- rule:
dl_type: 0x0800
ip_proto: 1
actions:
output:
- tunnel: {dp: s1, port: 1}
same_acl:
- rule:
dl_type: 0x0800
ip_proto: 1
actions:
output:
- tunnel: {dp: s1, port: 1}
none_acl:
- rule:
dl_type: 0x0800
ip_proto: 1
actions:
output:
- tunnel: {dp: s2, port: 1}
vlans:
vlan100:
vid: 1
dps:
s1:
dp_id: 0x1
hardware: 'GenericTFM'
stack:
priority: 1
interfaces:
1:
name: src_tunnel_host
native_vlan: vlan100
acls_in: [src_acl]
2:
name: same_tunnel_host
native_vlan: vlan100
acls_in: [same_acl]
3:
stack: {dp: s2, port: 3}
4:
stack: {dp: s2, port: 4}
s2:
dp_id: 0x2
hardware: 'GenericTFM'
interfaces:
1:
name: dst_tunnel_host
native_vlan: vlan100
acls_in: [dst_acl]
2:
name: transit_tunnel_host
native_vlan: vlan100
acls_in: [none_acl]
3:
stack: {dp: s1, port: 3}
4:
stack: {dp: s1, port: 4}
"""
def setUp(self):
"""Create a stacking config file."""
self.setup_valves(self.CONFIG)
self.activate_all_ports()
for valve in self.valves_manager.valves.values():
for port in valve.dp.ports.values():
if port.stack:
self.set_stack_port_up(port.number, valve)
def validate_tunnel(self, in_port, in_vid, out_port, out_vid, expected, msg, eth_type=0x0800, ip_proto=1):
bcast_match = {
'in_port': in_port,
'eth_dst': mac.BROADCAST_STR,
'eth_type': eth_type,
'ip_proto': ip_proto,
}
if in_vid:
in_vid = in_vid | ofp.OFPVID_PRESENT
bcast_match['vlan_vid'] = in_vid
if out_vid:
out_vid = out_vid | ofp.OFPVID_PRESENT
table = self.network.tables[self.DP_ID]
if expected:
self.assertTrue(table.is_output(bcast_match, port=out_port, vid=out_vid), msg=msg)
else:
self.assertFalse(table.is_output(bcast_match, port=out_port, vid=out_vid), msg=msg)
def test_update_src_tunnel(self):
"""Test tunnel rules when encapsulating and forwarding to the destination switch"""
valve = self.valves_manager.valves[0x1]
port = valve.dp.ports[3]
# Apply tunnel to ofmsgs on valve
self.apply_ofmsgs(valve.switch_manager.add_tunnel_acls())
# Should encapsulate and output packet towards tunnel destination s3
self.validate_tunnel(
1, 0, 3, self.SRC_ID, True,
'Did not encapsulate and forward')
self.validate_tunnel(
1, 0, 3, self.SRC_ID, True,
'Did not encapsulate and forward',
eth_type=0x86dd, ip_proto=56)
# Set the chosen port down to force a recalculation on the tunnel path
self.set_port_down(port.number)
ofmsgs = valve.switch_manager.add_tunnel_acls()
self.assertTrue(ofmsgs, 'No tunnel ofmsgs returned after a topology change')
self.apply_ofmsgs(ofmsgs)
# Should encapsulate and output packet using the new path
self.validate_tunnel(
1, 0, 4, self.SRC_ID, True,
'Did not encapsulate and forward out re-calculated port')
def test_update_same_tunnel(self):
"""Test tunnel rules when outputting to host on the same switch as the source"""
valve = self.valves_manager.valves[0x1]
self.apply_ofmsgs(valve.switch_manager.add_tunnel_acls())
self.validate_tunnel(2, 0, 1, 0, True, 'Did not forward to host on same DP')
def test_update_dst_tunnel(self):
"""Test a tunnel outputting to the correct tunnel destination"""
valve = self.valves_manager.valves[0x1]
port = valve.dp.ports[3]
# Apply tunnel to ofmsgs on valve
self.apply_ofmsgs(valve.switch_manager.add_tunnel_acls())
# Should accept encapsulated packet and output to the destination host
self.validate_tunnel(3, self.DST_ID, 1, 0, True, 'Did not output to host')
# Set the chosen port down to force a recalculation on the tunnel path
self.set_port_down(port.number)
ofmsgs = valve.switch_manager.add_tunnel_acls()
self.assertTrue(ofmsgs, 'No tunnel ofmsgs returned after a topology change')
self.apply_ofmsgs(ofmsgs)
# Should ccept encapsulated packet and output using the new path
self.validate_tunnel(4, self.DST_ID, 1, 0, True, 'Did not output to host')
def test_update_none_tunnel(self):
"""Test tunnel on a switch not using a tunnel ACL"""
valve = self.valves_manager.valves[0x1]
self.apply_ofmsgs(valve.switch_manager.add_tunnel_acls())
# Should drop any packets received from the tunnel
self.validate_tunnel(
5, self.NONE_ID, None, None, False,
'Should not output a packet')
self.validate_tunnel(
6, self.NONE_ID, None, None, False,
'Should not output a packet')
class ValveTestTransitOrderedTunnel(ValveTestBases.ValveTestNetwork):
"""Test tunnel ACL implementation"""
TRANSIT_ID = 2
CONFIG = """
acls:
transit_acl:
- rule:
dl_type: 0x0800
ip_proto: 1
actions:
output:
- tunnel: {dp: s3, port: 1}
vlans:
vlan100:
vid: 1
dps:
s1:
dp_id: 0x1
hardware: 'GenericTFM'
stack:
priority: 1
interfaces:
3:
stack: {dp: s2, port: 3}
4:
stack: {dp: s2, port: 4}
5:
stack: {dp: s3, port: 5}
6:
stack: {dp: s3, port: 6}
s2:
dp_id: 0x2
hardware: 'GenericTFM'
interfaces:
1:
name: source_host
native_vlan: vlan100
acls_in: [transit_acl]
3:
stack: {dp: s1, port: 3}
4:
stack: {dp: s1, port: 4}
s3:
dp_id: 0x3
hardware: 'GenericTFM'
interfaces:
1:
name: destination_host
native_vlan: vlan100
5:
stack: {dp: s1, port: 5}
6:
stack: {dp: s1, port: 6}
"""
def setUp(self):
"""Create a stacking config file."""
self.setup_valves(self.CONFIG)
self.activate_all_ports()
for valve in self.valves_manager.valves.values():
for port in valve.dp.ports.values():
if port.stack:
self.set_stack_port_up(port.number, valve)
def validate_tunnel(self, in_port, in_vid, out_port, out_vid, expected, msg):
bcast_match = {
'in_port': in_port,
'eth_dst': mac.BROADCAST_STR,
'eth_type': 0x0800,
'ip_proto': 1
}
if in_vid:
in_vid = in_vid | ofp.OFPVID_PRESENT
bcast_match['vlan_vid'] = in_vid
if out_vid:
out_vid = out_vid | ofp.OFPVID_PRESENT
table = self.network.tables[self.DP_ID]
if expected:
self.assertTrue(table.is_output(bcast_match, port=out_port, vid=out_vid), msg=msg)
else:
self.assertFalse(table.is_output(bcast_match, port=out_port, vid=out_vid), msg=msg)
def test_update_transit_tunnel(self):
"""Test a tunnel through a transit switch (forwards to the correct switch)"""
valve = self.valves_manager.valves[0x1]
port1 = valve.dp.ports[3]
port2 = valve.dp.ports[5]
# Apply tunnel to ofmsgs on valve
self.apply_ofmsgs(valve.switch_manager.add_tunnel_acls())
# Should accept packet from stack and output to the next switch
self.validate_tunnel(
3, self.TRANSIT_ID, 5, self.TRANSIT_ID, True,
'Did not output to next switch')
# Set the chosen port down to force a recalculation on the tunnel path
self.set_port_down(port1.number)
# Should accept encapsulated packet and output using the new path
self.validate_tunnel(
4, self.TRANSIT_ID, 5, self.TRANSIT_ID, True,
'Did not output to next switch')
# Set the chosen port to the next switch down to force a path recalculation
self.set_port_down(port2.number)
ofmsgs = valve.switch_manager.add_tunnel_acls()
self.assertTrue(ofmsgs, 'No tunnel ofmsgs returned after a topology change')
self.apply_ofmsgs(ofmsgs)
# Should accept encapsulated packet and output using the new path
self.validate_tunnel(
4, self.TRANSIT_ID, 6, self.TRANSIT_ID, True,
'Did not output to next switch')
class ValveTestMultipleOrderedTunnel(ValveTestBases.ValveTestNetwork):
"""Test tunnel ACL implementation with multiple hosts containing tunnel ACL"""
TUNNEL_ID = 2
CONFIG = """
acls:
tunnel_acl:
- rule:
dl_type: 0x0800
ip_proto: 1
actions:
output:
- tunnel: {dp: s2, port: 1}
vlans:
vlan100:
vid: 1
dps:
s1:
dp_id: 0x1
hardware: 'GenericTFM'
stack:
priority: 1
interfaces:
1:
native_vlan: vlan100
acls_in: [tunnel_acl]
2:
native_vlan: vlan100
acls_in: [tunnel_acl]
3:
stack: {dp: s2, port: 3}
4:
stack: {dp: s2, port: 4}
s2:
dp_id: 0x2
hardware: 'GenericTFM'
interfaces:
1:
native_vlan: vlan100
3:
stack: {dp: s1, port: 3}
4:
stack: {dp: s1, port: 4}
"""
def setUp(self):
"""Create a stacking config file."""
self.setup_valves(self.CONFIG)
self.activate_all_ports()
for valve in self.valves_manager.valves.values():
for port in valve.dp.ports.values():
if port.stack:
self.set_stack_port_up(port.number, valve)
def validate_tunnel(self, in_port, in_vid, out_port, out_vid, expected, msg):
bcast_match = {
'in_port': in_port,
'eth_dst': mac.BROADCAST_STR,
'eth_type': 0x0800,
'ip_proto': 1
}
if in_vid:
in_vid = in_vid | ofp.OFPVID_PRESENT
bcast_match['vlan_vid'] = in_vid
if out_vid:
out_vid = out_vid | ofp.OFPVID_PRESENT
table = self.network.tables[self.DP_ID]
if expected:
self.assertTrue(table.is_output(bcast_match, port=out_port, vid=out_vid), msg=msg)
else:
self.assertFalse(table.is_output(bcast_match, port=out_port, vid=out_vid), msg=msg)
def test_tunnel_update_multiple_tunnels(self):
"""Test having multiple hosts with the same tunnel"""
valve = self.valves_manager.valves[0x1]
port = valve.dp.ports[3]
# Apply tunnel to ofmsgs on valve
self.apply_ofmsgs(valve.switch_manager.add_tunnel_acls())
# Should encapsulate and output packet towards tunnel destination s3
self.validate_tunnel(
1, 0, 3, self.TUNNEL_ID, True,
'Did not encapsulate and forward')
self.validate_tunnel(
2, 0, 3, self.TUNNEL_ID, True,
'Did not encapsulate and forward')
# Set the chosen port down to force a recalculation on the tunnel path
self.set_port_down(port.number)
ofmsgs = valve.switch_manager.add_tunnel_acls()
self.assertTrue(ofmsgs, 'No tunnel ofmsgs returned after a topology change')
self.apply_ofmsgs(ofmsgs)
# Should encapsulate and output packet using the new path
self.validate_tunnel(
1, 0, 4, self.TUNNEL_ID, True,
'Did not encapsulate and forward out re-calculated port')
self.validate_tunnel(
1, 0, 4, self.TUNNEL_ID, True,
'Did not encapsulate and forward out re-calculated port')
class ValveTwoDpRoot(ValveTestBases.ValveTestNetwork):
"""Test simple stack topology from root."""
CONFIG = """
dps:
s1:
dp_id: 0x1
hardware: 'GenericTFM'
stack:
priority: 1
interfaces:
1:
native_vlan: 100
2:
stack:
dp: s2
port: 2
s2:
dp_id: 0x2
hardware: 'GenericTFM'
interfaces:
1:
native_vlan: 100
2:
stack:
dp: s1
port: 2
"""
CONFIG3 = """
dps:
s1:
dp_id: 0x1
hardware: 'GenericTFM'
stack:
priority: 1
interfaces:
1:
native_vlan: 100
2:
stack:
dp: s2
port: 2
3:
tagged_vlans: [100]
s2:
dp_id: 0x2
hardware: 'GenericTFM'
interfaces:
1:
native_vlan: 100
2:
stack:
dp: s1
port: 2
"""
def setUp(self):
self.setup_valves(self.CONFIG)
def test_topo(self):
"""Test topology functions."""
dp = self.valves_manager.valves[self.DP_ID].dp
self.assertTrue(dp.is_stack_root())
self.assertFalse(dp.is_stack_edge())
def test_add_remove_port(self):
self.update_and_revert_config(self.CONFIG, self.CONFIG3, 'warm')
class ValveTwoDpRootEdge(ValveTestBases.ValveTestNetwork):
"""Test simple stack topology from edge."""
CONFIG = """
dps:
s1:
dp_id: 0x1
hardware: 'GenericTFM'
interfaces:
1:
native_vlan: 100
2:
stack:
dp: s2
port: 2
s2:
dp_id: 0x2
hardware: 'GenericTFM'
stack:
priority: 1
interfaces:
1:
native_vlan: 100
2:
stack:
dp: s1
port: 2
"""
CONFIG3 = """
dps:
s1:
dp_id: 0x1
hardware: 'GenericTFM'
interfaces:
1:
native_vlan: 100
2:
stack:
dp: s2
port: 2
3:
tagged_vlans: [100]
s2:
dp_id: 0x2
hardware: 'GenericTFM'
stack:
priority: 1
interfaces:
1:
native_vlan: 100
2:
stack:
dp: s1
port: 2
"""
def setUp(self):
self.setup_valves(self.CONFIG)
def test_topo(self):
"""Test topology functions."""
dp_obj = self.valves_manager.valves[self.DP_ID].dp
self.assertFalse(dp_obj.is_stack_root())
self.assertTrue(dp_obj.is_stack_edge())
def test_add_remove_port(self):
self.update_and_revert_config(self.CONFIG, self.CONFIG3, 'warm')
class GroupDeleteACLTestCase(ValveTestBases.ValveTestNetwork):
"""Test that a group ACL creates a groupdel for the group_id"""
CONFIG = """
acls:
group-acl:
- rule:
dl_dst: "0e:00:00:00:02:02"
actions:
output:
failover:
group_id: 1001
ports: [2, 3]
vlans:
vlan100:
vid: 100
dps:
s1:
dp_id: 0x1
hardware: 'GenericTFM'
interfaces:
1:
native_vlan: vlan100
acls_in: [group-acl]
2:
native_vlan: vlan100
3:
native_vlan: vlan100
"""
def setUp(self):
self.setup_valves(self.CONFIG)
def check_groupmods_exist(self, ofmsgs, groupdel_exists=True):
"""Test that the ACL groupmods exist when expected"""
groupdel = None
groupmod = None
for ofmsg in ofmsgs:
if valve_of.is_groupdel(ofmsg) and not valve_of.is_global_groupdel(ofmsg):
groupdel = ofmsg
elif valve_of.is_groupmod(ofmsg):
groupmod = ofmsg
self.assertIsNotNone(groupmod)
if groupdel_exists:
self.assertIsNotNone(groupdel)
if groupdel is not None:
self.assertTrue(groupdel.group_id, 1001)
else:
self.assertIsNone(groupdel)
def test_groupdel_exists(self):
"""Test valve_flowreorder doesn't remove groupmods unless expected"""
valve = self.valves_manager.valves[0x1]
port = valve.dp.ports[1]
ofmsgs = valve.acl_manager.add_port(port)
self.check_groupmods_exist(valve_of.valve_flowreorder(ofmsgs))
global_flowmod = valve_of.flowmod(
0, ofp.OFPFC_DELETE, ofp.OFPTT_ALL,
0, ofp.OFPP_CONTROLLER, ofp.OFPP_CONTROLLER,
valve_of.match_from_dict({}), (), 0, 0, 0)
self.check_groupmods_exist(
valve_of.valve_flowreorder(ofmsgs + [global_flowmod]))
global_metermod = valve_of.meterdel()
self.check_groupmods_exist(
valve_of.valve_flowreorder(ofmsgs + [global_flowmod, global_metermod]))
global_groupmod = valve_of.groupdel()
self.check_groupmods_exist(
valve_of.valve_flowreorder(
ofmsgs + [global_flowmod, global_metermod, global_groupmod]), False)
class ValveWarmStartStackTest(ValveTestBases.ValveTestNetwork):
"""Test warm starting stack ports"""
CONFIG = """
vlans:
vlan100:
vid: 100
vlan200:
vid: 200
dps:
s1:
dp_id: 1
hardware: 'GenericTFM'
stack: {priority: 1}
interfaces:
1:
stack: {dp: s2, port: 1}
2:
name: host1
native_vlan: vlan100
3:
name: host2
native_vlan: vlan200
4:
name: host3
native_vlan: vlan200
s2:
dp_id: 2
hardware: 'GenericTFM'
interfaces:
1:
stack: {dp: s1, port: 1}
2:
stack: {dp: s3, port: 1}
4:
name: host4
native_vlan: vlan100
5:
name: host5
native_vlan: vlan200
s3:
dp_id: 3
hardware: 'GenericTFM'
interfaces:
1:
stack: {dp: s2, port: 2}
3:
name: host6
native_vlan: vlan100
4:
name: host7
native_vlan: vlan200
"""
NEW_PORT_CONFIG = """
vlans:
vlan100:
vid: 100
vlan200:
vid: 200
dps:
s1:
dp_id: 1
hardware: 'GenericTFM'
stack: {priority: 1}
interfaces:
1:
stack: {dp: s2, port: 1}
2:
name: host1
native_vlan: vlan100
3:
name: host2
native_vlan: vlan200
4:
name: host3
native_vlan: vlan200
s2:
dp_id: 2
hardware: 'GenericTFM'
interfaces:
1:
stack: {dp: s1, port: 1}
2:
stack: {dp: s3, port: 1}
3:
stack: {dp: s3, port: 2}
4:
name: host4
native_vlan: vlan100
5:
name: host5
native_vlan: vlan200
s3:
dp_id: 3
hardware: 'GenericTFM'
interfaces:
1:
stack: {dp: s2, port: 2}
2:
stack: {dp: s2, port: 3}
3:
name: host6
native_vlan: vlan100
4:
name: host7
native_vlan: vlan200
"""
NEW_VLAN_CONFIG = """
vlans:
vlan100:
vid: 100
vlan200:
vid: 200
dps:
s1:
dp_id: 1
hardware: 'GenericTFM'
stack: {priority: 1}
interfaces:
1:
stack: {dp: s2, port: 1}
2:
name: host1
native_vlan: vlan100
3:
name: host2
native_vlan: vlan100
4:
name: host3
native_vlan: vlan200
s2:
dp_id: 2
hardware: 'GenericTFM'
interfaces:
1:
stack: {dp: s1, port: 1}
2:
stack: {dp: s3, port: 1}
4:
name: host4
native_vlan: vlan100
5:
name: host5
native_vlan: vlan200
s3:
dp_id: 3
hardware: 'GenericTFM'
interfaces:
1:
stack: {dp: s2, port: 2}
3:
name: host6
native_vlan: vlan100
4:
name: host7
native_vlan: vlan200
"""
def setUp(self):
"""Setup network and start stack ports"""
self.setup_valves(self.CONFIG)
def test_reload_topology_change(self):
"""Test reload with topology change forces stack ports down"""
self.update_and_revert_config(
self.CONFIG, self.NEW_PORT_CONFIG, 'warm')
with open(self.config_file, 'w') as config_file:
config_file.write(self.NEW_PORT_CONFIG)
new_dps = self.valves_manager.parse_configs(self.config_file)
for new_dp in new_dps:
valve = self.valves_manager.valves[new_dp.dp_id]
changes = valve.dp.get_config_changes(valve.logger, new_dp)
changed_ports, all_ports_changed = changes[1], changes[6]
for port in valve.dp.stack_ports:
if not all_ports_changed:
self.assertIn(
port.number, changed_ports,
'Stack port not detected as changed on topology change')
def test_reload_vlan_change(self):
"""Test reload with topology change stack ports stay up"""
self.update_and_revert_config(
self.CONFIG, self.NEW_VLAN_CONFIG, 'warm')
with open(self.config_file, 'w') as config_file:
config_file.write(self.NEW_VLAN_CONFIG)
new_dps = self.valves_manager.parse_configs(self.config_file)
for new_dp in new_dps:
valve = self.valves_manager.valves[new_dp.dp_id]
changed_ports = valve.dp.get_config_changes(valve.logger, new_dp)[1]
for port in valve.dp.stack_ports:
self.assertNotIn(
port.number, changed_ports,
'Stack port detected as changed on non-topology change')
if __name__ == "__main__":
unittest.main() # pytype: disable=module-attr
|
py | b411e542ede6add18a03b22c72d34fc995e095d4 | import logging
from pingbacks.strategies import strategy_functions
PREFIXES = '''
@prefix dct: <http://purl.org/dc/terms/> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix xml: <http://www.w3.org/XML/1998/namespace> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
@prefix pns-data: <http://promsns.org/eg/entity-server/id/dataset/> .
@prefix foaf: <http://xmlns.com/foaf/0.1/> .
@prefix dcat: <http://www.w3.org/ns/dcat#> .
@prefix prov: <http://www.w3.org/ns/prov#> .
'''
# data from http://promsns.org/eg/entity-server/id/dataset/
DATA = [
{
'entity_uri': 'http://promsns.org/eg/entity-server/id/dataset/001',
'rdf_metadata': '''
pns-data:001 a dcat:Dataset, prov:Entity ;
dct:description "No pingback or provenance properties"^^xsd:string ;
dct:isPartOf <http://promsns.org/eg/entity-server/id/dataset> ;
dct:title "Dataset 001"^^xsd:string
.
'''
},
{
'entity_uri': 'http://promsns.org/eg/entity-server/id/dataset/002',
'rdf_metadata': '''
pns-data:002 a dcat:Dataset, prov:Entity ;
dct:description "has_provenance property only"^^xsd:string ;
dct:isPartOf <http://promsns.org/eg/entity-server/id/dataset> ;
dct:title "Dataset 002"^^xsd:string ;
dct:has_provenance <http://promsns.org/eg/entity-server/id/bundle/x>
.
'''
},
{
'entity_uri': 'http://promsns.org/eg/entity-server/id/dataset/003',
'rdf_metadata': '''
pns-data:003 a dcat:Dataset, prov:Entity ;
dct:description "has_query_service property only"^^xsd:string ;
dct:isPartOf <http://promsns.org/eg/entity-server/id/dataset> ;
dct:title "Dataset 003"^^xsd:string ;
prov:has_query_service <http://promsns.org/eg/entity-server/api/provenance-service>
.
'''
},
{
'entity_uri': 'http://promsns.org/eg/entity-server/id/dataset/004',
'rdf_metadata': '''
pns-data:004 a dcat:Dataset, prov:Entity ;
dct:description "pingback property only"^^xsd:string ;
dct:isPartOf <http://promsns.org/eg/entity-server/id/dataset> ;
dct:title "Dataset 004"^^xsd:string ;
prov:pingback <http://example.com/id/bundle/y>
.
'''
},
{
'entity_uri': 'http://promsns.org/eg/entity-server/id/dataset/005',
'rdf_metadata': '''
pns-data:005 a dcat:Dataset, prov:Entity ;
dct:description "has_provenance, has_query_service & pingback properties"^^xsd:string ;
dct:isPartOf <http://promsns.org/eg/entity-server/id/dataset> ;
dct:title "Dataset 005"^^xsd:string ;
prov:has_provenance <http://example.com/id/bundle/y> ;
prov:has_query_service <http://promsns.org/eg/entity-server/api/provenance-service> ;
prov:pingback <http://promsns.org/eg/entity-server/api/pingback-service/dataset/005> ,
<http://promsns.org/eg/entity-server/api/pingback-service2/dataset/005>
.
'''
},
{
'entity_uri': 'http://promsns.org/eg/entity-server/id/dataset/006',
'rdf_metadata': '''
<http://example.com/metadata/1> a dcat:CatalogRecord, prov:Entity ;
prov:pingback <http://promsns.org/eg/entity-server/api/pingback-service/dataset/006> ;
foaf:primaryTopic pns-data:006
.
pns-data:006 a dcat:Dataset, prov:Entity ;
dct:description "Has a dcat:CatalogRecord with a prov:pingback property"^^xsd:string ;
dct:title "Dataset 006"^^xsd:string
.
'''
},
{
'entity_uri': 'http://promsns.org/eg/entity-server/id/dataset/007',
'rdf_metadata': '''
<http://example.com/metadata/1> a dcat:CatalogRecord, prov:Entity ;
prov:has_query_service <http://promsns.org/eg/entity-server/api/provenance-service> ;
foaf:primaryTopic pns-data:007
.
pns-data:007 a dcat:Dataset, prov:Entity ;
dct:description "Has a dcat:CatalogRecord with a prov:pingback property"^^xsd:string ;
dct:title "Dataset 006"^^xsd:string
.
'''
}
]
def test_is_dereferencable():
# test a 200 response - HTML
r = strategy_functions.is_dereferencable('http://promsns.org')
a = r[0] and 'text/html' in r[2]['Content-Type']
# test a 200 response - RDF
r = strategy_functions.is_dereferencable('http://promsns.org/eg/entity-server/id/dataset/001')
b = r[0] and 'text/turtle' in r[2]['Content-Type']
# test a 404 response
r = strategy_functions.is_dereferencable('http://promsns.org/eg/entity-server/id/dataset/999')
c = not r[0]
# test a non-dereferenacable response
r = strategy_functions.is_dereferencable('http://broken_link.com/resource/1')
d = not r[0]
return a and b and c and d
def test_has_valid_rdf_meatadata_true():
metadata = PREFIXES + '''
pns-data:002 a dcat:Dataset, prov:Entity ;
dct:description "has_provenance property only"^^xsd:string ;
dct:isPartOf <http://promsns.org/eg/entity-server/id/dataset> ;
dct:title "Dataset 002"^^xsd:string ;
prov:has_provenance <http://promsns.org/eg/entity-server/id/bundle/x>
.
'''
headers = 'text/turtle'
return strategy_functions.has_valid_rdf_meatadata(metadata, headers)[0]
def test_has_valid_rdf_meatadata_false():
# missing a final '.' after last line
metadata = PREFIXES + '''
pns-data:002 a dcat:Dataset, prov:Entity ;
dct:description "has_provenance property only"^^xsd:string ;
dct:isPartOf <http://promsns.org/eg/entity-server/id/dataset> ;
dct:title "Dataset 002"^^xsd:string ;
prov:has_provenance <http://promsns.org/eg/entity-server/id/bundle/x>
'''
headers = 'text/turtle'
return not strategy_functions.has_valid_rdf_meatadata(metadata, headers)[0]
def test_get_pingback_endpoints_via_given():
entity_uri = 'http://example.com/resource/1'
ttl = PREFIXES + '''
<''' + entity_uri + '''> a dcat:Dataset, prov:Entity ;
dct:description "has_provenance property only"^^xsd:string ;
dct:title "Dataset 002"^^xsd:string ;
prov:pingback <http://example.com/resource/1/pingback> ,
<http://example.com/resource/1/pingback/2>
.
'''
expected_results = ['http://example.com/resource/1/pingback', 'http://example.com/resource/1/pingback/2'].sort()
actual_results = strategy_functions.get_pingback_endpoints_via_given(strategy_functions.get_graph(ttl), entity_uri).sort()
ttl2 = PREFIXES + '''
<''' + entity_uri + '''> a dcat:Dataset, prov:Entity ;
dct:description "has_provenance property only"^^xsd:string ;
dct:title "Dataset 002"^^xsd:string
.
'''
expected_results2 = []
actual_results2 = strategy_functions.get_pingback_endpoints_via_given(strategy_functions.get_graph(ttl2), entity_uri) # cannot sort []
return actual_results == expected_results and actual_results2 == expected_results2
def test_get_has_query_service_endpoints_via_given():
entity_uri = 'http://example.com/resource/1'
ttl = PREFIXES + '''
<''' + entity_uri + '''> a dcat:Dataset, prov:Entity ;
dct:description "has_provenance property only"^^xsd:string ;
dct:title "Dataset 002"^^xsd:string ;
prov:has_query_service <http://example.com/resource/1/pingback> ,
<http://example.com/resource/1/pingback/2>
.
'''
expected_results = ['http://example.com/resource/1/pingback', 'http://example.com/resource/1/pingback/2']
expected_results.sort()
actual_results = strategy_functions.get_has_query_service_endpoints_via_given(strategy_functions.get_graph(ttl), entity_uri)
actual_results.sort()
ttl2 = PREFIXES + '''
<''' + entity_uri + '''> a dcat:Dataset, prov:Entity ;
dct:description "has_provenance property only"^^xsd:string ;
dct:title "Dataset 002"^^xsd:string
.
'''
expected_results2 = []
actual_results2 = strategy_functions.get_has_query_service_endpoints_via_given(strategy_functions.get_graph(ttl2), entity_uri) # cannot sort []
return actual_results == expected_results and actual_results2 == expected_results2
def test_get_pingback_endpoints_via_lookup():
expected_results = [
{
'entity_uri': 'http://promsns.org/eg/entity-server/id/dataset/004',
'enpoints': ['http://example.com/id/bundle/y']
},
{
'entity_uri': 'http://promsns.org/eg/entity-server/id/dataset/005',
'enpoints': [
'http://promsns.org/eg/entity-server/api/pingback-service/dataset/005',
'http://promsns.org/eg/entity-server/api/pingback-service2/dataset/005'
]
},
{
'entity_uri': 'http://promsns.org/eg/entity-server/id/dataset/006',
'enpoints': ['http://promsns.org/eg/entity-server/api/pingback-service/dataset/006']
}
]
actual_results = []
for datum in DATA:
r = strategy_functions.get_pingback_endpoints_via_lookup(strategy_functions.get_graph(PREFIXES + datum['rdf_metadata']), datum['entity_uri'])
if len(r) > 0:
r.sort()
actual_results.append({
'entity_uri': datum['entity_uri'],
'enpoints': r
})
return actual_results == expected_results
def test_get_has_provenance_service_endpoints_via_lookup():
expected_results = [
{
'entity_uri': 'http://promsns.org/eg/entity-server/id/dataset/003',
'enpoints': ['http://promsns.org/eg/entity-server/api/provenance-service']
},
{
'entity_uri': 'http://promsns.org/eg/entity-server/id/dataset/005',
'enpoints': ['http://promsns.org/eg/entity-server/api/provenance-service']
},
{
'entity_uri': 'http://promsns.org/eg/entity-server/id/dataset/007',
'enpoints': ['http://promsns.org/eg/entity-server/api/provenance-service']
}
]
actual_results = []
for datum in DATA:
r = strategy_functions.get_has_query_service_endpoints_via_lookup(strategy_functions.get_graph(PREFIXES + datum['rdf_metadata']), datum['entity_uri'])
if len(r) > 0:
r.sort()
actual_results.append({
'entity_uri': datum['entity_uri'],
'enpoints': r
})
return actual_results == expected_results
def test_send_pingback():
# test against the pingback service on the Entity Server
pingback_test_uri = 'http://promsns.org/eg/entity-server/api/pingback-service/dataset/001'
pingback_test_uri = 'http://localhost:8010/api/pingback-service/dataset/001'
fake_provenance_uris = [
'# this is a comment',
'http://example.com/a/b/c',
'http://example.com/d/e/f',
'http://example.com/g/h/i',
'http://example.com/j/k/l',
'http://example.com/m/n/o',
]
r1 = strategy_functions.send(pingback_test_uri, fake_provenance_uris)[0]
# test a broken URI
r2 = not strategy_functions.send(pingback_test_uri, 'not_a_uri')[0]
# test empty messages
r3 = not strategy_functions.send(pingback_test_uri, [])[0]
r4 = not strategy_functions.send(pingback_test_uri, '')[0]
# test further_links - pass
further_links_example = [
{
'resource': 'http://example.com/g/h/i',
'rel': 'has_provenance',
'anchor': 'http://example.com/resource'
},
{
'resource': 'http://example.com/j/k/l',
'rel': 'has_query_service',
'anchor': 'http://example.com/endpoint1'
},
{
'resource': 'http://example.com/m/n/o',
'rel': 'has_query_service',
'anchor': 'http://example.com/endpoint2'
}
]
r5 = strategy_functions.send(pingback_test_uri, fake_provenance_uris, further_links_example)[0]
'''
# test further_links - fail, Exception raised
further_links_example = [
{
'resource': 'http://example.com/g/h/i/x',
'rel': 'has_provenance',
'anchor': 'http://example.com/resource'
}
]
r6 = functions.send_pingback(pingback_test_uri, fake_provenance_uris,further_links_example)[0]
'''
'''
# test further_links - fail, Exception raised
further_links_example = [
{
'resource': 'http://example.com/g/h/i',
'rel': 'has_provenance',
'anchor': 'www.example.com/resource'
}
]
r7 = functions.send_pingback(pingback_test_uri, fake_provenance_uris,further_links_example)[0]
'''
return r1 and r2 and r3 and r4 and r5
def test_send_bundle():
pass
if __name__ == "__main__":
logging.basicConfig()
print((test_is_dereferencable()))
print((test_has_valid_rdf_meatadata_true()))
print((test_has_valid_rdf_meatadata_false()))
print((test_get_pingback_endpoints_via_given()))
print((test_get_pingback_endpoints_via_lookup()))
print((test_get_has_query_service_endpoints_via_given()))
print((test_get_has_provenance_service_endpoints_via_lookup()))
print((test_send_pingback())) |
py | b411e574ab222bd18283b5c9461ccb7240373f10 | from pychonet.echonetapiclient import ECHONETAPIClient
from .EchonetInstance import EchonetInstance
from .HomeAirConditioner import HomeAirConditioner
from .HomeSolarPower import HomeSolarPower
from .ElectricVehicleCharger import ElectricVehicleCharger
from .StorageBattery import StorageBattery
from .TemperatureSensor import TemperatureSensor
from .ElectricBlind import ElectricBlind
from .GeneralLighting import GeneralLighting
from pychonet.lib.eojx import EOJX_CLASS
def Factory(host, server, eojgc, eojcc, eojci= 0x01):
instance = EOJX_CLASS[eojgc][eojcc]
"""Factory Method"""
# TODO - probably a much cleaner way of doing this.
instances = {
'Home air conditioner': HomeAirConditioner,
'Home solar power generation': HomeSolarPower,
'Electric vehicle charger/discharger': ElectricVehicleCharger,
'Temperature sensor': TemperatureSensor,
'Storage Battery': StorageBattery,
'Electrically operated blind/shade': ElectricBlind,
'General lighting': GeneralLighting
}
instance_object = instances.get(instance, None)
if instance_object is not None:
return instance_object(host, server, eojci)
return EchonetInstance(host, eojgc, eojcc, eojci, server)
|
py | b411e7dcca0f792d0ce4d442386ed29f590783eb | import logging
import os
import re
import traceback
from io import StringIO
import winutils
from pywintypes import com_error
import cddagl
from cddagl.i18n import proxy_gettext as _
from cddagl.sql.functions import get_config_value, config_true
version = cddagl.__version__
logger = logging.getLogger('cddagl')
def log_exception(extype, value, tb):
tb_io = StringIO()
traceback.print_tb(tb, file=tb_io)
logger.critical(_('Global error:\nLauncher version: {version}\nType: '
'{extype}\nValue: {value}\nTraceback:\n{traceback}').format(
version=cddagl.__version__, extype=str(extype), value=str(value),
traceback=tb_io.getvalue()))
def ensure_slash(path):
"""Return path making sure it has a trailing slash at the end."""
return os.path.join(path, '')
def unique(seq):
"""Return unique entries in a unordered sequence while original order."""
seen = set()
for x in seq:
if x not in seen:
seen.add(x)
yield x
def clean_qt_path(path):
return path.replace('/', '\\')
def safe_filename(filename):
keepcharacters = (' ', '.', '_', '-')
return ''.join(c for c in filename if c.isalnum() or c in keepcharacters
).strip()
def tryint(s):
try:
return int(s)
except:
return s
def alphanum_key(s):
""" Turn a string into a list of string and number chunks.
"z23a" -> ["z", 23, "a"]
"""
return arstrip([tryint(c) for c in re.split('([0-9]+)', s)])
def arstrip(value):
while len(value) > 1 and value[-1:] == ['']:
value = value[:-1]
return value
def is_64_windows():
return 'PROGRAMFILES(X86)' in os.environ
def bitness():
if is_64_windows():
return _('64-bit')
else:
return _('32-bit')
def sizeof_fmt(num, suffix=None):
if suffix is None:
suffix = _('B')
for unit in ['', _('Ki'), _('Mi'), _('Gi'), _('Ti'), _('Pi'), _('Ei'),
_('Zi')]:
if abs(num) < 1024.0:
return "%3.1f %s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f %s%s" % (num, _('Yi'), suffix)
def delete_path(path):
''' Move directory or file in the recycle bin (or permanently delete it
depending on the settings used) using the built in Windows File
operations dialog
'''
# Make sure we have an absolute path first
if not os.path.isabs(path):
path = os.path.abspath(path)
shellcon = winutils.shellcon
permanently_delete_files = config_true(
get_config_value('permanently_delete_files', 'False'))
if permanently_delete_files:
flags = 0
else:
flags = shellcon.FOF_ALLOWUNDO
flags = (flags |
shellcon.FOF_SILENT |
shellcon.FOF_NOCONFIRMATION |
shellcon.FOF_WANTNUKEWARNING
)
try:
return winutils.delete(path, flags)
except com_error:
return False
def move_path(srcpath, dstpath):
''' Move srcpath to dstpath using using the built in Windows File
operations dialog
'''
# Make sure we have absolute paths first
if not os.path.isabs(srcpath):
srcpath = os.path.abspath(srcpath)
if not os.path.isabs(dstpath):
dstpath = os.path.abspath(dstpath)
shellcon = winutils.shellcon
flags = (
shellcon.FOF_ALLOWUNDO |
shellcon.FOF_SILENT |
shellcon.FOF_NOCONFIRMMKDIR |
shellcon.FOF_NOCONFIRMATION |
shellcon.FOF_WANTNUKEWARNING
)
try:
return winutils.move(srcpath, dstpath, flags)
except com_error:
return False
|
py | b411e93d93fb51406618ee399294863c3db59147 | # Copyright (C) 2017-2020 Trent Houliston <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from ..callbacks import ClassificationImages, SeekerImages
from .dataset import Dataset
from .merge_configuration import merge_configuration
def ImageCallback(config, output_path):
validation_config = merge_configuration(config, config["dataset"].get("config", {}))
if config["label"]["type"] == "Classification":
n_images = config["training"]["validation"]["progress_images"]
classes = config["label"]["config"]["classes"]
return ClassificationImages(
output_path=output_path,
dataset=Dataset(config, "validation", batch_size=n_images).take(1),
# Draw using the first colour in the list for each class
colours=[c["colours"][0] for c in classes],
)
elif config["label"]["type"] == "Seeker":
n_images = config["training"]["validation"]["progress_images"]
return SeekerImages(
output_path=output_path,
dataset=Dataset(config, "validation", batch_size=n_images).take(1),
model=validation_config["projection"]["config"]["mesh"]["model"],
geometry=validation_config["projection"]["config"]["geometry"]["shape"],
radius=validation_config["projection"]["config"]["geometry"]["radius"],
scale=validation_config["label"]["config"]["scale"],
)
else:
raise RuntimeError("Cannot create images callback, {} is not a supported type".format(config["label"]["type"]))
|
py | b411e971620242deab99a9c43adc822132e6f87d | from django.contrib import admin
from .models import PriceOracle, PriceOracleTicker, Token
from .price_oracles import CannotGetTokenPriceFromApi
@admin.register(PriceOracle)
class PriceOracleAdmin(admin.ModelAdmin):
list_display = ('name', 'configuration')
ordering = ('name',)
@admin.register(PriceOracleTicker)
class PriceOracleTickerAdmin(admin.ModelAdmin):
list_display = ('token_symbol', 'price_oracle_name', 'ticker', 'inverse', 'price')
list_filter = (('token', admin.RelatedOnlyFieldListFilter), 'inverse')
list_select_related = ('price_oracle', 'token')
search_fields = ['token__symbol', '=token__address', 'price_oracle__name']
def price_oracle_name(self, obj):
return obj.price_oracle.name
def token_symbol(self, obj):
return obj.token.symbol
@admin.register(Token)
class TokenAdmin(admin.ModelAdmin):
list_display = ('relevance', 'address', 'name', 'symbol', 'decimals', 'fixed_eth_conversion', 'gas')
list_filter = ('gas', 'decimals', 'fixed_eth_conversion')
ordering = ('relevance',)
search_fields = ['symbol', 'address', 'name']
readonly_fields = ('eth_value', 'price_oracle_ticker_pairs')
def eth_value(self, obj: Token):
if self.decimals is None: # Add token admin page
return .0
try:
return obj.get_eth_value()
except CannotGetTokenPriceFromApi:
return None
def price_oracle_ticker_pairs(self, obj: Token):
return [(price_oracle_ticker.price_oracle.name, price_oracle_ticker.ticker) for price_oracle_ticker
in obj.price_oracle_tickers.all()]
|
py | b411ea612122da15caed0002a10595341dd3d5d9 | #!/usr/bin/env python
from setuptools import setup, find_packages
from os import path
import codecs
import os
import re
import sys
def read(*parts):
file_path = path.join(path.dirname(__file__), *parts)
return codecs.open(file_path, encoding='utf-8').read()
def find_version(*parts):
version_file = read(*parts)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
if version_match:
return str(version_match.group(1))
raise RuntimeError("Unable to find version string.")
setup(
name = 'django_polymorphic',
version = find_version('polymorphic', '__version__.py'),
license = 'BSD',
description = 'Seamless Polymorphic Inheritance for Django Models',
long_description = read('README.rst'),
url = 'https://github.com/chrisglass/django_polymorphic',
author = 'Bert Constantin',
author_email = '[email protected]',
maintainer = 'Christopher Glass',
maintainer_email = '[email protected]',
packages = find_packages(),
package_data = {
'polymorphic': [
'templates/admin/polymorphic/*.html',
],
},
install_requires=['setuptools'],
test_suite='runtests',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
|
py | b411ea93c32303a1c545a555cb1677cc78b85359 | from django.conf import settings
from django.http import HttpResponse
from django.test import TestCase
from django.test.client import Client
from paypal.standard.ipn.models import PayPalIPN
from paypal.standard.ipn.signals import (payment_was_successful,
payment_was_flagged)
IPN_POST_PARAMS = {
"protection_eligibility": "Ineligible",
"last_name": "User",
"txn_id": "51403485VH153354B",
"receiver_email": getattr(settings, "PAYPAL_RECEIVER_EMAIL", ""),
"payment_status": "Completed",
"payment_gross": "10.00",
"tax": "0.00",
"residence_country": "US",
"invoice": "0004",
"payer_status": "verified",
"txn_type": "express_checkout",
"handling_amount": "0.00",
"payment_date": "23:04:06 Feb 02, 2009 PST",
"first_name": "Test",
"item_name": "",
"charset": "windows-1252",
"custom": "website_id=13&user_id=21",
"notify_version": "2.6",
"transaction_subject": "",
"test_ipn": "1",
"item_number": "",
"receiver_id": "258DLEHY2BDK6",
"payer_id": "BN5JZ2V7MLEV4",
"verify_sign": "An5ns1Kso7MWUdW4ErQKJJJ4qi4-AqdZy6dD.sGO3sDhTf1wAbuO2IZ7",
"payment_fee": "0.59",
"mc_fee": "0.59",
"mc_currency": "USD",
"shipping": "0.00",
"payer_email": "[email protected]",
"payment_type": "instant",
"mc_gross": "10.00",
"quantity": "1",
}
class IPNTest(TestCase):
urls = 'paypal.standard.ipn.tests.test_urls'
def setUp(self):
self.old_debug = settings.DEBUG
settings.DEBUG = True
# Monkey patch over PayPalIPN to make it get a VERFIED response.
self.old_postback = PayPalIPN._postback
PayPalIPN._postback = lambda self: "VERIFIED"
def tearDown(self):
settings.DEBUG = self.old_debug
PayPalIPN._postback = self.old_postback
def assertGotSignal(self, signal, flagged):
# Check the signal was sent. These get lost if they don't reference self.
self.got_signal = False
self.signal_obj = None
def handle_signal(sender, **kwargs):
self.got_signal = True
self.signal_obj = sender
signal.connect(handle_signal)
response = self.client.post("/ipn/", IPN_POST_PARAMS)
self.assertEqual(response.status_code, 200)
ipns = PayPalIPN.objects.all()
self.assertEqual(len(ipns), 1)
ipn_obj = ipns[0]
self.assertEqual(ipn_obj.flag, flagged)
self.assertTrue(self.got_signal)
self.assertEqual(self.signal_obj, ipn_obj)
def test_correct_ipn(self):
self.assertGotSignal(payment_was_successful, False)
def test_failed_ipn(self):
PayPalIPN._postback = lambda self: "INVALID"
self.assertGotSignal(payment_was_flagged, True)
def assertFlagged(self, updates, flag_info):
params = IPN_POST_PARAMS.copy()
params.update(updates)
response = self.client.post("/ipn/", params)
self.assertEqual(response.status_code, 200)
ipn_obj = PayPalIPN.objects.all()[0]
self.assertEqual(ipn_obj.flag, True)
self.assertEqual(ipn_obj.flag_info, flag_info)
def test_incorrect_receiver_email(self):
update = {"receiver_email": "[email protected]"}
flag_info = "Invalid receiver_email. ([email protected])"
self.assertFlagged(update, flag_info)
def test_invalid_payment_status(self):
update = {"payment_status": "Failed"}
flag_info = "Invalid payment_status. (Failed)"
self.assertFlagged(update, flag_info)
def test_duplicate_txn_id(self):
self.client.post("/ipn/", IPN_POST_PARAMS)
self.client.post("/ipn/", IPN_POST_PARAMS)
self.assertEqual(len(PayPalIPN.objects.all()), 2)
ipn_obj = PayPalIPN.objects.order_by('-created_at')[1]
self.assertEqual(ipn_obj.flag, True)
self.assertEqual(ipn_obj.flag_info, "Duplicate txn_id. (51403485VH153354B)")
|
py | b411eb1fafa9548621209a0065fdd36b0edc5b68 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['Volume']
class Volume(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
azure_file_parameters: Optional[pulumi.Input[pulumi.InputType['VolumeProviderParametersAzureFileArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
provider: Optional[pulumi.Input[Union[str, 'VolumeProvider']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
volume_resource_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
This type describes a volume resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['VolumeProviderParametersAzureFileArgs']] azure_file_parameters: This type describes a volume provided by an Azure Files file share.
:param pulumi.Input[str] description: User readable description of the volume.
:param pulumi.Input[str] location: The geo-location where the resource lives
:param pulumi.Input[Union[str, 'VolumeProvider']] provider: Provider of the volume.
:param pulumi.Input[str] resource_group_name: Azure resource group name
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[str] volume_resource_name: The identity of the volume.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['azure_file_parameters'] = azure_file_parameters
__props__['description'] = description
__props__['location'] = location
if provider is None and not opts.urn:
raise TypeError("Missing required property 'provider'")
__props__['provider'] = provider
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['tags'] = tags
__props__['volume_resource_name'] = volume_resource_name
__props__['name'] = None
__props__['provisioning_state'] = None
__props__['status'] = None
__props__['status_details'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:servicefabricmesh:Volume"), pulumi.Alias(type_="azure-nextgen:servicefabricmesh/v20180701preview:Volume")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Volume, __self__).__init__(
'azure-nextgen:servicefabricmesh/v20180901preview:Volume',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Volume':
"""
Get an existing Volume resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return Volume(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="azureFileParameters")
def azure_file_parameters(self) -> pulumi.Output[Optional['outputs.VolumeProviderParametersAzureFileResponse']]:
"""
This type describes a volume provided by an Azure Files file share.
"""
return pulumi.get(self, "azure_file_parameters")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
User readable description of the volume.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def provider(self) -> pulumi.Output[str]:
"""
Provider of the volume.
"""
return pulumi.get(self, "provider")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
State of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def status(self) -> pulumi.Output[str]:
"""
Status of the volume.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter(name="statusDetails")
def status_details(self) -> pulumi.Output[str]:
"""
Gives additional information about the current status of the volume.
"""
return pulumi.get(self, "status_details")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
py | b411eb2ad4baa60dbda8e8be3dccbf7c16599b02 | import matlab.engine
import pickle
import numpy as np
import os
'''
This code propagates rays through the PICO-60 geometry. The jar volume is partitioned into 3x3x3 cm boxes,
and files containing all rays passing through each box are saved in the folder './partitions'. The
files can be read using pickle.load, and each contain a dictionary with the ray index as the key and
the start and end points of the ray as values.
This should be run before running generate_event.py to create the necessary files. Note that this
code runs the matlab files FitPICO60Geometry.m and GetRaysAndPixels.m which requires the Matlab Engine
API to be installed.
'''
# If fit parameters are saved, load them. Otherwise, compute them.
new_params = False
try:
with open('fitparams.pkl', 'rb') as f:
allparams, cam1params, cam2params, cam3params, cam4params = pickle.load(f)
print('Read file fitparams.pkl.')
except:
print('Computing fitparams.')
eng = matlab.engine.start_matlab()
new_params = True
allparams, cam1params, cam2params, cam3params, cam4params = eng.FitPICO60Geometry(nargout=5)
with open('fitparams.pkl', 'wb') as f:
pickle.dump([allparams, cam1params, cam2params, cam3params, cam4params], f)
try:
assert not new_params
with open('rays.pkl', 'rb') as f:
scatters, ray_startingpoints, pixels, gen_params = pickle.load(f)
print('Read file rays.pkl.')
except:
print('Computing rays.')
if not 'eng' in locals() and not 'eng' in globals():
eng = matlab.engine.start_matlab()
scatters, ray_startingpoints, pixels, gen_params = eng.GetRaysAndPixels(allparams, [170,108], nargout=4)
with open('rays.pkl', 'wb') as f:
pickle.dump([scatters, ray_startingpoints, pixels, gen_params], f)
if 'eng' in locals() or 'eng' in globals():
eng.quit()
r1, r2, r3, cyl_axis = gen_params[0][1], gen_params[0][3], gen_params[0][5], gen_params[0][6:]
# r1 is main inner cylinder radius
# r2 is minor radius of torus defining the inner surface of the knuckle of the jar
# r3 is the radius of the sphere defining the inner surface of the dome of the jar
s = r3 * (r1 - r2)/(r3 - r2)
z = -r2 * np.sqrt(1 - (s/r3)**2)
d = r3 * z * (1/r3 - 1/r2)
cyl_axis = np.array(cyl_axis)
# These are the dimensions of a box bounding the volume of the detector in which we wish to generate points
box = (30., 30., 96.) # Each dimension should be an integer multiple of 3
# Coordinate of the center of the box
box_center = (0., 0., 38.)
# These should be the same as specified in partition_rays.py
# These need to be tuples of floats, not ints
# Size of the partition boxes in cm
ps = 3
def injar(p, r1, r2, r3, cyl_axis, z, d):
# Returns true if point p is inside the jar, false otherwise
if p[0]**2 + p[1]**2 < r1**2 and p[2] >= 0:
return True
if np.linalg.norm(p - cyl_axis*d) < r3 and np.dot(cyl_axis, p) < z:
return True
if np.dot(cyl_axis, p) < 0 and np.dot(cyl_axis, p) >= z:
if p[0]**2 + p[1]**2 < r3**2 - (np.abs(z) + cyl_axis[2]*d)**2:
return True
if (np.dot(p,p) - (r1 - r2)**2 - r2**2)**2 - 4*(r1 - r2)**2*(r2**2 - p[2]**2) < 0:
return True
return False
# Creates a dict of all rays which scattered at least once
print('Creating scatter dictionary.')
scatter_dicts = [{}, {}, {}, {}]
for cam in range(len(scatter_dicts)):
for scatter in scatters:
index = int(scatter[3])-1
if int(pixels[index][0])-1 != cam or index < 0:
continue
if index in scatter_dicts[cam]:
scatter_dicts[cam][index].append(list(scatter[:3]))
else:
scatter_dicts[cam].update( {index : [list(scatter[:3])]} )
for i, starting_point in enumerate(ray_startingpoints):
if i in scatter_dicts[cam]:
scatter_dicts[cam][i].insert(0, list(starting_point))
del scatters
del ray_startingpoints
def generate_points(n, r1, r2, r3, cyl_axis, z, d):
# Returns an n-by-3 array of random points within the detector geometry
rand_point = lambda: [box[0]*np.random.rand() - box[0]/2 + box_center[0],
box[1]*np.random.rand() - box[1]/2 + box_center[1],
box[2]*np.random.rand() - box[2]/2 + box_center[2]]
if n == 1:
p = rand_point()
while not injar(p, r1, r2, r3, cyl_axis, z, d):
p = rand_point()
return np.array(p)
pts = np.zeros([n,3])
for i in range(n):
p = rand_point()
while not injar(p, r1, r2, r3, cyl_axis, z, d):
p = rand_point()
pts[i] = p
return pts
# Dictionaries of the ray segments for each camera that are inside the jar
# The dictionary key is the ray index and the value is the two scatter points on either side of the ray segment
print('Getting rays which intersect the jar.')
rays_in_jar_dict = [{}, {}, {}, {}]
for cam, s_dict in enumerate(scatter_dicts):
for index in s_dict:
scats = s_dict[index]
npts = len(scats)
ray_seg_in_jar_index = 0
for s in range(1,npts):
pt1 = np.array(scats[-s])
pt2 = np.array(scats[-s-1])
if injar(0.5*(pt1 + pt2), r1, r2, r3, cyl_axis, z, d):
ray_seg_in_jar_index = s
break
if ray_seg_in_jar_index != 0:
rays_in_jar_dict[cam].update( {index : [pt2, pt1]} )
del scatter_dicts
def get_intersection(dist1, dist2, p1, p2):
# Helper function for LBintersection
if dist1*dist2 >= 0 or dist1 == dist2:
return None
return p1 - (p2-p1)*dist1/(dist2-dist1)
def in_box(hit, B1, B2, axis):
# Helper function for LBintersection
if axis == 1 and hit[1] > B1[1] and hit[1] < B2[1] and hit[2] > B1[2] and hit[2] < B2[2]:
return True
if axis == 2 and hit[0] > B1[0] and hit[0] < B2[0] and hit[2] > B1[2] and hit[2] < B2[2]:
return True
if axis == 3 and hit[0] > B1[0] and hit[0] < B2[0] and hit[1] > B1[1] and hit[1] < B2[1]:
return True
return False
def LBintersection(L1, L2, B1, B2):
# Determines if a line, defined by points L1 and L2 intersects a box, defined by minimal and maximal corners B1 and B2.
if (L1[0] < B1[0] and L2[0] < B1[0]) or (L1[0] > B2[0] and L2[0] > B2[0]):
return False
if (L1[1] < B1[1] and L2[1] < B1[1]) or (L1[1] > B2[1] and L2[1] > B2[1]):
return False
if (L1[2] < B1[2] and L2[2] < B1[2]) or (L1[2] > B2[2] and L2[2] > B2[2]):
return False
hit = get_intersection(L1[0]-B1[0], L2[0]-B1[0], L1, L2)
if hit is not None and in_box(hit, B1, B2, 1):
return True
hit = get_intersection(L1[1]-B1[1], L2[1]-B1[1], L1, L2)
if hit is not None and in_box(hit, B1, B2, 2):
return True
hit = get_intersection(L1[2]-B1[2], L2[2]-B1[2], L1, L2)
if hit is not None and in_box(hit, B1, B2, 3):
return True
hit = get_intersection(L1[0]-B2[0], L2[0]-B2[0], L1, L2)
if hit is not None and in_box(hit, B1, B2, 1):
return True
hit = get_intersection(L1[1]-B2[1], L2[1]-B2[1], L1, L2)
if hit is not None and in_box(hit, B1, B2, 2):
return True
hit = get_intersection(L1[2]-B2[2], L2[2]-B2[2], L1, L2)
if hit is not None and in_box(hit, B1, B2, 3):
return True
return False
partition_coords = [[i,j,k] for i in range(int(box[0]//ps)) for j in range(int(box[1]//ps)) for k in range(int(box[2]//ps))]
partition_ray_dicts = [{} for _ in range(len(partition_coords))]
# List of pairs of coordinates, minimal and maximal, defining each box in the partition
box_coords = []
for coords in partition_coords:
B1 = [float(coords[0]*ps - box[0]/2 + box_center[0]),
float(coords[1]*ps - box[1]/2 + box_center[1]),
float(coords[2]*ps - box[2]/2 + box_center[2])]
B2 = [float((coords[0] + 1)*ps - box[0]/2 + box_center[0]),
float((coords[1] + 1)*ps - box[1]/2 + box_center[1]),
float((coords[2] + 1)*ps - box[2]/2 + box_center[2])]
box_coords.append([B1, B2])
# For each camera and each box in the partition, make a dictionary containing all rays passing through that box
print('Getting rays in volume partitions.')
os.makedirs('partitions', exist_ok=True)
for cam, ray_dict in enumerate(rays_in_jar_dict):
for i, [B1, B2] in enumerate(box_coords):
rays_in_box_dict = {}
for ray_index in ray_dict:
L1, L2 = ray_dict[ray_index]
if LBintersection(L1, L2, B1, B2):
rays_in_box_dict.update( {ray_index : [L1,L2]} )
with open('partitions/cam%ibox%i.pkl' % (cam,i), 'wb') as f:
pickle.dump(rays_in_box_dict, f)
print('Wrote file partitions/cam%ibox%i.pkl.' % (cam,i))
|
py | b411ec5b542c567d2708ad5593b85254c500ce2a | from django.contrib.auth.models import User
from rest_framework import serializers
from challenges.models import LeaderboardData
from participants.models import Participant, ParticipantTeam
from .models import Submission
class SubmissionSerializer(serializers.ModelSerializer):
participant_team_name = serializers.SerializerMethodField()
execution_time = serializers.SerializerMethodField()
def __init__(self, *args, **kwargs):
context = kwargs.get('context')
if context and context.get('request').method == 'POST':
created_by = context.get('request').user
kwargs['data']['created_by'] = created_by.pk
participant_team = context.get('participant_team').pk
kwargs['data']['participant_team'] = participant_team
challenge_phase = context.get('challenge_phase').pk
kwargs['data']['challenge_phase'] = challenge_phase
super(SubmissionSerializer, self).__init__(*args, **kwargs)
class Meta:
model = Submission
fields = ('id', 'participant_team', 'participant_team_name', 'execution_time', 'challenge_phase',
'created_by', 'status', 'input_file', 'stdout_file', 'stderr_file', 'submitted_at',
'method_name', 'method_description', 'project_url', 'publication_url', 'is_public',
'submission_result_file', 'when_made_public',)
def get_participant_team_name(self, obj):
return obj.participant_team.team_name
def get_execution_time(self, obj):
return obj.execution_time
class LeaderboardDataSerializer(serializers.ModelSerializer):
participant_team_name = serializers.SerializerMethodField()
leaderboard_schema = serializers.SerializerMethodField()
def __init__(self, *args, **kwargs):
super(LeaderboardDataSerializer, self).__init__(*args, **kwargs)
class Meta:
model = LeaderboardData
fields = "__all__"
fields = ('id', 'participant_team_name', 'challenge_phase_split', 'leaderboard_schema', 'result')
def get_participant_team_name(self, obj):
return obj.submission.participant_team.team_name
def get_leaderboard_schema(self, obj):
return obj.leaderboard.schema
class ChallengeSubmissionManagementSerializer(serializers.ModelSerializer):
participant_team = serializers.SerializerMethodField()
challenge_phase = serializers.SerializerMethodField()
created_by = serializers.SerializerMethodField()
participant_team_members_email_ids = serializers.SerializerMethodField()
created_at = serializers.SerializerMethodField()
participant_team_members = serializers.SerializerMethodField()
class Meta:
model = Submission
fields = ('id', 'participant_team', 'challenge_phase', 'created_by', 'status', 'is_public',
'submission_number', 'submitted_at', 'execution_time', 'input_file', 'stdout_file',
'stderr_file', 'submission_result_file', 'submission_metadata_file',
'participant_team_members_email_ids', 'created_at', 'method_name', 'participant_team_members',)
def get_participant_team(self, obj):
return obj.participant_team.team_name
def get_challenge_phase(self, obj):
return obj.challenge_phase.name
def get_created_by(self, obj):
return obj.created_by.username
def get_participant_team_members_email_ids(self, obj):
try:
participant_team = ParticipantTeam.objects.get(team_name=obj.participant_team.team_name)
except ParticipantTeam.DoesNotExist:
return 'Participant team does not exist'
participant_ids = Participant.objects.filter(team=participant_team).values_list('user_id', flat=True)
return list(User.objects.filter(id__in=participant_ids).values_list('email', flat=True))
def get_created_at(self, obj):
return obj.created_at
def get_participant_team_members(self, obj):
try:
participant_team = ParticipantTeam.objects.get(team_name=obj.participant_team.team_name)
except ParticipantTeam.DoesNotExist:
return 'Participant team does not exist'
participant_ids = Participant.objects.filter(team=participant_team).values_list('user_id', flat=True)
return list(User.objects.filter(id__in=participant_ids).values('username', 'email'))
class SubmissionCount(object):
def __init__(self, submission_count):
self.submission_count = submission_count
class SubmissionCountSerializer(serializers.Serializer):
submission_count = serializers.IntegerField()
class LastSubmissionDateTime(object):
def __init__(self, last_submission_datetime):
self.last_submission_datetime = last_submission_datetime
class LastSubmissionDateTimeSerializer(serializers.Serializer):
last_submission_datetime = serializers.DateTimeField()
class CreateLeaderboardDataSerializer(serializers.ModelSerializer):
def __init__(self, *args, **kwargs):
context = kwargs.get('context')
if context and context.get('request').method == 'PUT':
challenge_phase_split = context.get('challenge_phase_split')
kwargs['data']['challenge_phase_split'] = challenge_phase_split.pk
submission = context.get('submission').pk
kwargs['data']['submission'] = submission
kwargs['data']['leaderboard'] = challenge_phase_split.leaderboard.pk
super(CreateLeaderboardDataSerializer, self).__init__(*args, **kwargs)
class Meta:
model = LeaderboardData
fields = ('challenge_phase_split', 'submission', 'result', 'leaderboard')
|
py | b411ece15d2b3ad8c64254e6d9c0a452eb8897ad | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._replicas_operations import build_list_by_server_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ReplicasOperations:
"""ReplicasOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.rdbms.mysql.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list_by_server(
self,
resource_group_name: str,
server_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ServerListResult"]:
"""List all the replicas for a given server.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ServerListResult or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.rdbms.mysql.models.ServerListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServerListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_server_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
server_name=server_name,
template_url=self.list_by_server.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_server_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
server_name=server_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ServerListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_server.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforMySQL/servers/{serverName}/replicas'} # type: ignore
|
py | b411ee57f2fdb19ffb2e8eb07085d860dbf58e03 | # Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for minimization."""
import io
from Bio import PDB
import numpy as np
from simtk.openmm import app as openmm_app
from simtk.openmm.app.internal.pdbstructure import PdbStructure
from alphafold.common import residue_constants
def overwrite_pdb_coordinates(pdb_str: str, pos) -> str:
pdb_file = io.StringIO(pdb_str)
structure = PdbStructure(pdb_file)
topology = openmm_app.PDBFile(structure).getTopology()
with io.StringIO() as f:
openmm_app.PDBFile.writeFile(topology, pos, f)
return f.getvalue()
def overwrite_b_factors(pdb_str: str, bfactors: np.ndarray) -> str:
"""Overwrites the B-factors in pdb_str with contents of bfactors array.
Args:
pdb_str: An input PDB string.
bfactors: A numpy array with shape [1, n_residues, 37]. We assume that the
B-factors are per residue; i.e. that the nonzero entries are identical in
[0, i, :].
Returns:
A new PDB string with the B-factors replaced.
"""
if bfactors.shape[-1] != residue_constants.atom_type_num:
raise ValueError(
f'Invalid final dimension size for bfactors: {bfactors.shape[-1]}.')
parser = PDB.PDBParser()
handle = io.StringIO(pdb_str)
structure = parser.get_structure('', handle)
curr_resid = ('', '', '')
idx = -1
for atom in structure.get_atoms():
atom_resid = atom.parent.get_id()
if atom_resid != curr_resid:
idx += 1
if idx >= bfactors.shape[0]:
raise ValueError('Index into bfactors exceeds number of residues. '
'B-factors shape: {shape}, idx: {idx}.')
curr_resid = atom_resid
atom.bfactor = bfactors[idx, residue_constants.atom_order['CA']]
new_pdb = io.StringIO()
pdb_io = PDB.PDBIO()
pdb_io.set_structure(structure)
pdb_io.save(new_pdb)
return new_pdb.getvalue()
def assert_equal_nonterminal_atom_types(
atom_mask: np.ndarray, ref_atom_mask: np.ndarray):
"""Checks that pre- and post-minimized proteins have same atom set."""
# Ignore any terminal OXT atoms which may have been added by minimization.
oxt = residue_constants.atom_order['OXT']
no_oxt_mask = np.ones(shape=atom_mask.shape, dtype=np.bool)
no_oxt_mask[..., oxt] = False
np.testing.assert_almost_equal(ref_atom_mask[no_oxt_mask],
atom_mask[no_oxt_mask])
|
py | b411ee683cec811180dc3f1116580fe917882311 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayTradeCreateResponse(AlipayResponse):
def __init__(self):
super(AlipayTradeCreateResponse, self).__init__()
self._out_trade_no = None
self._trade_no = None
@property
def out_trade_no(self):
return self._out_trade_no
@out_trade_no.setter
def out_trade_no(self, value):
self._out_trade_no = value
@property
def trade_no(self):
return self._trade_no
@trade_no.setter
def trade_no(self, value):
self._trade_no = value
def parse_response_content(self, response_content):
response = super(AlipayTradeCreateResponse, self).parse_response_content(response_content)
if 'out_trade_no' in response:
self.out_trade_no = response['out_trade_no']
if 'trade_no' in response:
self.trade_no = response['trade_no']
|
py | b411ee6f57a1e7fe7ce9b44974d057174d31cef4 | """CLI to upload data to a MetaGenScope Server."""
from sys import stderr
import click
from .utils import add_authorization
@click.group()
def manage():
"""Manage MetaGenScope data."""
pass
@manage.command()
@add_authorization()
@click.argument('group_uuid')
def delete_group(uploader, group_uuid):
"""Delete a sample group by uuid."""
try:
response = uploader.knex.delete(f'/api/v1/sample_groups/{group_uuid}')
click.echo(response)
except Exception: # pylint:disable=broad-except
print(f'[manage-delete_group-error] {group_uuid}', file=stderr)
|
py | b411ef10c360a9a4a61fac8205399a88979d1ebe | # twitch-osu-bot documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'twitch-osu-bot'
copyright = """2017, Peter Rowlands"""
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'twitch_osu_botdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'twitch_osu_bot.tex',
'twitch-osu-bot Documentation',
"""Peter Rowlands""", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'twitch_osu_bot', 'twitch-osu-bot Documentation',
["""Peter Rowlands"""], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'twitch_osu_bot', 'twitch-osu-bot Documentation',
"""Peter Rowlands""", 'twitch-osu-bot',
"""Osu! Twitch (+Bancho) chat bot.""", 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
|
py | b411efc32528fdedec4928f44db6759e4c136305 | """
Django settings for app project.
Generated by 'django-admin startproject' using Django 2.1.15.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '2c$o0dw^k+#qnyauoso8r-eincz8b#(kqbk8-)%k-da4=24)dz'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
|
py | b411f0f6f769d8ae9ae0d94861a0b7cc34459304 | from abc import abstractmethod
from enum import Enum
from ingest.importer.conversion.exceptions import InvalidBooleanValue
class DataType(Enum):
STRING = 'string'
INTEGER = 'integer'
BOOLEAN = 'boolean'
NUMBER = 'number'
UNDEFINED = 'undefined'
@staticmethod
def find(value: str):
data_type = DataType.UNDEFINED
if value is not None:
try:
data_type = DataType(value.lower())
except ValueError:
pass
return data_type
class Converter:
@abstractmethod
def convert(self, data):
raise NotImplementedError()
class DefaultConverter(Converter):
def convert(self, data):
return data
class StringConverter(Converter):
def convert(self, data):
return str(data).strip()
class IntegerConverter(Converter):
def convert(self, data):
return int(data)
class NumberConverter(Converter):
def convert(self, data):
return float(data)
BOOLEAN_TABLE = {
'true': True,
'yes': True,
'false': False,
'no': False
}
class BooleanConverter(Converter):
def convert(self, data):
value = BOOLEAN_TABLE.get(data.lower())
if value is None:
raise InvalidBooleanValue(data)
return value
CONVERTER_MAP = {
DataType.STRING: StringConverter(),
DataType.INTEGER: IntegerConverter(),
DataType.BOOLEAN: BooleanConverter(),
DataType.NUMBER: NumberConverter()
}
class ListConverter(Converter):
def __init__(self, data_type: DataType = DataType.STRING, base_converter: Converter = None):
self.base_type = data_type
if base_converter is not None:
self.base_type = DataType.UNDEFINED
self.base_converter = base_converter
else:
self.base_converter = CONVERTER_MAP.get(data_type, CONVERTER_MAP[DataType.STRING])
def convert(self, data):
data = str(data)
value = data.split('||')
value = [self.base_converter.convert(elem) for elem in value]
return value
DEFAULT = DefaultConverter()
|
py | b411f152f0fce0e114412e76b77ea38a3b0adb51 | import cv2
import sys
video = cv2.VideoCapture(0)
Fpath = 'haarcascade_frontalface_default.xml'
Epath = 'haarcascade_eye.xml'
a = 1
while True:
a = a+1
check, frame = video.read()
print(frame)
face_cascade = cv2.CascadeClassifier(Fpath)
eye_cascade = cv2.CascadeClassifier(Epath)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
face = face_cascade.detectMultiScale(gray, scaleFactor = 1.005, minNeighbors = 5, minSize = (5, 5), flags = cv2.CASCADE_SCALE_IMAGE)
eye = eye_cascade.detectMultiScale(gray, scaleFactor = 1.005, minNeighbors = 10, minSize = (5, 5), flags = cv2.CASCADE_SCALE_IMAGE)
for x, y, w, h in face:
cv2.rectangle(gray, (x, y), (x+w, y+h), (255, 0, 0), 3)
for ex, ey, ew, eh in eye:
cv2.rectangle(gray, (ex, ey), (ex+ew, ey+eh), (0, 0, 255), 3)
cv2.imshow('Captured', gray)
key = cv2.waitKey(2)
if key == ord('q'):
break
video.release()
cv2.destroyAllWindows()
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.