max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
mc-sema/protobuf-2.5.0/python/google/protobuf/internal/test_util.py | randolphwong/mcsema | 252 | 11073043 | <gh_stars>100-1000
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Utilities for Python proto2 tests.
This is intentionally modeled on C++ code in
//google/protobuf/test_util.*.
"""
__author__ = '<EMAIL> (<NAME>)'
import os.path
from google.protobuf import unittest_import_pb2
from google.protobuf import unittest_pb2
def SetAllNonLazyFields(message):
"""Sets every non-lazy field in the message to a unique value.
Args:
message: A unittest_pb2.TestAllTypes instance.
"""
#
# Optional fields.
#
message.optional_int32 = 101
message.optional_int64 = 102
message.optional_uint32 = 103
message.optional_uint64 = 104
message.optional_sint32 = 105
message.optional_sint64 = 106
message.optional_fixed32 = 107
message.optional_fixed64 = 108
message.optional_sfixed32 = 109
message.optional_sfixed64 = 110
message.optional_float = 111
message.optional_double = 112
message.optional_bool = True
# TODO(robinson): Firmly spec out and test how
# protos interact with unicode. One specific example:
# what happens if we change the literal below to
# u'115'? What *should* happen? Still some discussion
# to finish with Kenton about bytes vs. strings
# and forcing everything to be utf8. :-/
message.optional_string = '115'
message.optional_bytes = '116'
message.optionalgroup.a = 117
message.optional_nested_message.bb = 118
message.optional_foreign_message.c = 119
message.optional_import_message.d = 120
message.optional_public_import_message.e = 126
message.optional_nested_enum = unittest_pb2.TestAllTypes.BAZ
message.optional_foreign_enum = unittest_pb2.FOREIGN_BAZ
message.optional_import_enum = unittest_import_pb2.IMPORT_BAZ
message.optional_string_piece = '124'
message.optional_cord = '125'
#
# Repeated fields.
#
message.repeated_int32.append(201)
message.repeated_int64.append(202)
message.repeated_uint32.append(203)
message.repeated_uint64.append(204)
message.repeated_sint32.append(205)
message.repeated_sint64.append(206)
message.repeated_fixed32.append(207)
message.repeated_fixed64.append(208)
message.repeated_sfixed32.append(209)
message.repeated_sfixed64.append(210)
message.repeated_float.append(211)
message.repeated_double.append(212)
message.repeated_bool.append(True)
message.repeated_string.append('215')
message.repeated_bytes.append('216')
message.repeatedgroup.add().a = 217
message.repeated_nested_message.add().bb = 218
message.repeated_foreign_message.add().c = 219
message.repeated_import_message.add().d = 220
message.repeated_lazy_message.add().bb = 227
message.repeated_nested_enum.append(unittest_pb2.TestAllTypes.BAR)
message.repeated_foreign_enum.append(unittest_pb2.FOREIGN_BAR)
message.repeated_import_enum.append(unittest_import_pb2.IMPORT_BAR)
message.repeated_string_piece.append('224')
message.repeated_cord.append('225')
# Add a second one of each field.
message.repeated_int32.append(301)
message.repeated_int64.append(302)
message.repeated_uint32.append(303)
message.repeated_uint64.append(304)
message.repeated_sint32.append(305)
message.repeated_sint64.append(306)
message.repeated_fixed32.append(307)
message.repeated_fixed64.append(308)
message.repeated_sfixed32.append(309)
message.repeated_sfixed64.append(310)
message.repeated_float.append(311)
message.repeated_double.append(312)
message.repeated_bool.append(False)
message.repeated_string.append('315')
message.repeated_bytes.append('316')
message.repeatedgroup.add().a = 317
message.repeated_nested_message.add().bb = 318
message.repeated_foreign_message.add().c = 319
message.repeated_import_message.add().d = 320
message.repeated_lazy_message.add().bb = 327
message.repeated_nested_enum.append(unittest_pb2.TestAllTypes.BAZ)
message.repeated_foreign_enum.append(unittest_pb2.FOREIGN_BAZ)
message.repeated_import_enum.append(unittest_import_pb2.IMPORT_BAZ)
message.repeated_string_piece.append('324')
message.repeated_cord.append('325')
#
# Fields that have defaults.
#
message.default_int32 = 401
message.default_int64 = 402
message.default_uint32 = 403
message.default_uint64 = 404
message.default_sint32 = 405
message.default_sint64 = 406
message.default_fixed32 = 407
message.default_fixed64 = 408
message.default_sfixed32 = 409
message.default_sfixed64 = 410
message.default_float = 411
message.default_double = 412
message.default_bool = False
message.default_string = '415'
message.default_bytes = '416'
message.default_nested_enum = unittest_pb2.TestAllTypes.FOO
message.default_foreign_enum = unittest_pb2.FOREIGN_FOO
message.default_import_enum = unittest_import_pb2.IMPORT_FOO
message.default_string_piece = '424'
message.default_cord = '425'
def SetAllFields(message):
SetAllNonLazyFields(message)
message.optional_lazy_message.bb = 127
def SetAllExtensions(message):
"""Sets every extension in the message to a unique value.
Args:
message: A unittest_pb2.TestAllExtensions instance.
"""
extensions = message.Extensions
pb2 = unittest_pb2
import_pb2 = unittest_import_pb2
#
# Optional fields.
#
extensions[pb2.optional_int32_extension] = 101
extensions[pb2.optional_int64_extension] = 102
extensions[pb2.optional_uint32_extension] = 103
extensions[pb2.optional_uint64_extension] = 104
extensions[pb2.optional_sint32_extension] = 105
extensions[pb2.optional_sint64_extension] = 106
extensions[pb2.optional_fixed32_extension] = 107
extensions[pb2.optional_fixed64_extension] = 108
extensions[pb2.optional_sfixed32_extension] = 109
extensions[pb2.optional_sfixed64_extension] = 110
extensions[pb2.optional_float_extension] = 111
extensions[pb2.optional_double_extension] = 112
extensions[pb2.optional_bool_extension] = True
extensions[pb2.optional_string_extension] = '115'
extensions[pb2.optional_bytes_extension] = '116'
extensions[pb2.optionalgroup_extension].a = 117
extensions[pb2.optional_nested_message_extension].bb = 118
extensions[pb2.optional_foreign_message_extension].c = 119
extensions[pb2.optional_import_message_extension].d = 120
extensions[pb2.optional_public_import_message_extension].e = 126
extensions[pb2.optional_lazy_message_extension].bb = 127
extensions[pb2.optional_nested_enum_extension] = pb2.TestAllTypes.BAZ
extensions[pb2.optional_nested_enum_extension] = pb2.TestAllTypes.BAZ
extensions[pb2.optional_foreign_enum_extension] = pb2.FOREIGN_BAZ
extensions[pb2.optional_import_enum_extension] = import_pb2.IMPORT_BAZ
extensions[pb2.optional_string_piece_extension] = '124'
extensions[pb2.optional_cord_extension] = '125'
#
# Repeated fields.
#
extensions[pb2.repeated_int32_extension].append(201)
extensions[pb2.repeated_int64_extension].append(202)
extensions[pb2.repeated_uint32_extension].append(203)
extensions[pb2.repeated_uint64_extension].append(204)
extensions[pb2.repeated_sint32_extension].append(205)
extensions[pb2.repeated_sint64_extension].append(206)
extensions[pb2.repeated_fixed32_extension].append(207)
extensions[pb2.repeated_fixed64_extension].append(208)
extensions[pb2.repeated_sfixed32_extension].append(209)
extensions[pb2.repeated_sfixed64_extension].append(210)
extensions[pb2.repeated_float_extension].append(211)
extensions[pb2.repeated_double_extension].append(212)
extensions[pb2.repeated_bool_extension].append(True)
extensions[pb2.repeated_string_extension].append('215')
extensions[pb2.repeated_bytes_extension].append('216')
extensions[pb2.repeatedgroup_extension].add().a = 217
extensions[pb2.repeated_nested_message_extension].add().bb = 218
extensions[pb2.repeated_foreign_message_extension].add().c = 219
extensions[pb2.repeated_import_message_extension].add().d = 220
extensions[pb2.repeated_lazy_message_extension].add().bb = 227
extensions[pb2.repeated_nested_enum_extension].append(pb2.TestAllTypes.BAR)
extensions[pb2.repeated_foreign_enum_extension].append(pb2.FOREIGN_BAR)
extensions[pb2.repeated_import_enum_extension].append(import_pb2.IMPORT_BAR)
extensions[pb2.repeated_string_piece_extension].append('224')
extensions[pb2.repeated_cord_extension].append('225')
# Append a second one of each field.
extensions[pb2.repeated_int32_extension].append(301)
extensions[pb2.repeated_int64_extension].append(302)
extensions[pb2.repeated_uint32_extension].append(303)
extensions[pb2.repeated_uint64_extension].append(304)
extensions[pb2.repeated_sint32_extension].append(305)
extensions[pb2.repeated_sint64_extension].append(306)
extensions[pb2.repeated_fixed32_extension].append(307)
extensions[pb2.repeated_fixed64_extension].append(308)
extensions[pb2.repeated_sfixed32_extension].append(309)
extensions[pb2.repeated_sfixed64_extension].append(310)
extensions[pb2.repeated_float_extension].append(311)
extensions[pb2.repeated_double_extension].append(312)
extensions[pb2.repeated_bool_extension].append(False)
extensions[pb2.repeated_string_extension].append('315')
extensions[pb2.repeated_bytes_extension].append('316')
extensions[pb2.repeatedgroup_extension].add().a = 317
extensions[pb2.repeated_nested_message_extension].add().bb = 318
extensions[pb2.repeated_foreign_message_extension].add().c = 319
extensions[pb2.repeated_import_message_extension].add().d = 320
extensions[pb2.repeated_lazy_message_extension].add().bb = 327
extensions[pb2.repeated_nested_enum_extension].append(pb2.TestAllTypes.BAZ)
extensions[pb2.repeated_foreign_enum_extension].append(pb2.FOREIGN_BAZ)
extensions[pb2.repeated_import_enum_extension].append(import_pb2.IMPORT_BAZ)
extensions[pb2.repeated_string_piece_extension].append('324')
extensions[pb2.repeated_cord_extension].append('325')
#
# Fields with defaults.
#
extensions[pb2.default_int32_extension] = 401
extensions[pb2.default_int64_extension] = 402
extensions[pb2.default_uint32_extension] = 403
extensions[pb2.default_uint64_extension] = 404
extensions[pb2.default_sint32_extension] = 405
extensions[pb2.default_sint64_extension] = 406
extensions[pb2.default_fixed32_extension] = 407
extensions[pb2.default_fixed64_extension] = 408
extensions[pb2.default_sfixed32_extension] = 409
extensions[pb2.default_sfixed64_extension] = 410
extensions[pb2.default_float_extension] = 411
extensions[pb2.default_double_extension] = 412
extensions[pb2.default_bool_extension] = False
extensions[pb2.default_string_extension] = '415'
extensions[pb2.default_bytes_extension] = '416'
extensions[pb2.default_nested_enum_extension] = pb2.TestAllTypes.FOO
extensions[pb2.default_foreign_enum_extension] = pb2.FOREIGN_FOO
extensions[pb2.default_import_enum_extension] = import_pb2.IMPORT_FOO
extensions[pb2.default_string_piece_extension] = '424'
extensions[pb2.default_cord_extension] = '425'
def SetAllFieldsAndExtensions(message):
"""Sets every field and extension in the message to a unique value.
Args:
message: A unittest_pb2.TestAllExtensions message.
"""
message.my_int = 1
message.my_string = 'foo'
message.my_float = 1.0
message.Extensions[unittest_pb2.my_extension_int] = 23
message.Extensions[unittest_pb2.my_extension_string] = 'bar'
def ExpectAllFieldsAndExtensionsInOrder(serialized):
"""Ensures that serialized is the serialization we expect for a message
filled with SetAllFieldsAndExtensions(). (Specifically, ensures that the
serialization is in canonical, tag-number order).
"""
my_extension_int = unittest_pb2.my_extension_int
my_extension_string = unittest_pb2.my_extension_string
expected_strings = []
message = unittest_pb2.TestFieldOrderings()
message.my_int = 1 # Field 1.
expected_strings.append(message.SerializeToString())
message.Clear()
message.Extensions[my_extension_int] = 23 # Field 5.
expected_strings.append(message.SerializeToString())
message.Clear()
message.my_string = 'foo' # Field 11.
expected_strings.append(message.SerializeToString())
message.Clear()
message.Extensions[my_extension_string] = 'bar' # Field 50.
expected_strings.append(message.SerializeToString())
message.Clear()
message.my_float = 1.0
expected_strings.append(message.SerializeToString())
message.Clear()
expected = ''.join(expected_strings)
if expected != serialized:
raise ValueError('Expected %r, found %r' % (expected, serialized))
def ExpectAllFieldsSet(test_case, message):
"""Check all fields for correct values have after Set*Fields() is called."""
test_case.assertTrue(message.HasField('optional_int32'))
test_case.assertTrue(message.HasField('optional_int64'))
test_case.assertTrue(message.HasField('optional_uint32'))
test_case.assertTrue(message.HasField('optional_uint64'))
test_case.assertTrue(message.HasField('optional_sint32'))
test_case.assertTrue(message.HasField('optional_sint64'))
test_case.assertTrue(message.HasField('optional_fixed32'))
test_case.assertTrue(message.HasField('optional_fixed64'))
test_case.assertTrue(message.HasField('optional_sfixed32'))
test_case.assertTrue(message.HasField('optional_sfixed64'))
test_case.assertTrue(message.HasField('optional_float'))
test_case.assertTrue(message.HasField('optional_double'))
test_case.assertTrue(message.HasField('optional_bool'))
test_case.assertTrue(message.HasField('optional_string'))
test_case.assertTrue(message.HasField('optional_bytes'))
test_case.assertTrue(message.HasField('optionalgroup'))
test_case.assertTrue(message.HasField('optional_nested_message'))
test_case.assertTrue(message.HasField('optional_foreign_message'))
test_case.assertTrue(message.HasField('optional_import_message'))
test_case.assertTrue(message.optionalgroup.HasField('a'))
test_case.assertTrue(message.optional_nested_message.HasField('bb'))
test_case.assertTrue(message.optional_foreign_message.HasField('c'))
test_case.assertTrue(message.optional_import_message.HasField('d'))
test_case.assertTrue(message.HasField('optional_nested_enum'))
test_case.assertTrue(message.HasField('optional_foreign_enum'))
test_case.assertTrue(message.HasField('optional_import_enum'))
test_case.assertTrue(message.HasField('optional_string_piece'))
test_case.assertTrue(message.HasField('optional_cord'))
test_case.assertEqual(101, message.optional_int32)
test_case.assertEqual(102, message.optional_int64)
test_case.assertEqual(103, message.optional_uint32)
test_case.assertEqual(104, message.optional_uint64)
test_case.assertEqual(105, message.optional_sint32)
test_case.assertEqual(106, message.optional_sint64)
test_case.assertEqual(107, message.optional_fixed32)
test_case.assertEqual(108, message.optional_fixed64)
test_case.assertEqual(109, message.optional_sfixed32)
test_case.assertEqual(110, message.optional_sfixed64)
test_case.assertEqual(111, message.optional_float)
test_case.assertEqual(112, message.optional_double)
test_case.assertEqual(True, message.optional_bool)
test_case.assertEqual('115', message.optional_string)
test_case.assertEqual('116', message.optional_bytes)
test_case.assertEqual(117, message.optionalgroup.a)
test_case.assertEqual(118, message.optional_nested_message.bb)
test_case.assertEqual(119, message.optional_foreign_message.c)
test_case.assertEqual(120, message.optional_import_message.d)
test_case.assertEqual(126, message.optional_public_import_message.e)
test_case.assertEqual(127, message.optional_lazy_message.bb)
test_case.assertEqual(unittest_pb2.TestAllTypes.BAZ,
message.optional_nested_enum)
test_case.assertEqual(unittest_pb2.FOREIGN_BAZ,
message.optional_foreign_enum)
test_case.assertEqual(unittest_import_pb2.IMPORT_BAZ,
message.optional_import_enum)
# -----------------------------------------------------------------
test_case.assertEqual(2, len(message.repeated_int32))
test_case.assertEqual(2, len(message.repeated_int64))
test_case.assertEqual(2, len(message.repeated_uint32))
test_case.assertEqual(2, len(message.repeated_uint64))
test_case.assertEqual(2, len(message.repeated_sint32))
test_case.assertEqual(2, len(message.repeated_sint64))
test_case.assertEqual(2, len(message.repeated_fixed32))
test_case.assertEqual(2, len(message.repeated_fixed64))
test_case.assertEqual(2, len(message.repeated_sfixed32))
test_case.assertEqual(2, len(message.repeated_sfixed64))
test_case.assertEqual(2, len(message.repeated_float))
test_case.assertEqual(2, len(message.repeated_double))
test_case.assertEqual(2, len(message.repeated_bool))
test_case.assertEqual(2, len(message.repeated_string))
test_case.assertEqual(2, len(message.repeated_bytes))
test_case.assertEqual(2, len(message.repeatedgroup))
test_case.assertEqual(2, len(message.repeated_nested_message))
test_case.assertEqual(2, len(message.repeated_foreign_message))
test_case.assertEqual(2, len(message.repeated_import_message))
test_case.assertEqual(2, len(message.repeated_nested_enum))
test_case.assertEqual(2, len(message.repeated_foreign_enum))
test_case.assertEqual(2, len(message.repeated_import_enum))
test_case.assertEqual(2, len(message.repeated_string_piece))
test_case.assertEqual(2, len(message.repeated_cord))
test_case.assertEqual(201, message.repeated_int32[0])
test_case.assertEqual(202, message.repeated_int64[0])
test_case.assertEqual(203, message.repeated_uint32[0])
test_case.assertEqual(204, message.repeated_uint64[0])
test_case.assertEqual(205, message.repeated_sint32[0])
test_case.assertEqual(206, message.repeated_sint64[0])
test_case.assertEqual(207, message.repeated_fixed32[0])
test_case.assertEqual(208, message.repeated_fixed64[0])
test_case.assertEqual(209, message.repeated_sfixed32[0])
test_case.assertEqual(210, message.repeated_sfixed64[0])
test_case.assertEqual(211, message.repeated_float[0])
test_case.assertEqual(212, message.repeated_double[0])
test_case.assertEqual(True, message.repeated_bool[0])
test_case.assertEqual('215', message.repeated_string[0])
test_case.assertEqual('216', message.repeated_bytes[0])
test_case.assertEqual(217, message.repeatedgroup[0].a)
test_case.assertEqual(218, message.repeated_nested_message[0].bb)
test_case.assertEqual(219, message.repeated_foreign_message[0].c)
test_case.assertEqual(220, message.repeated_import_message[0].d)
test_case.assertEqual(227, message.repeated_lazy_message[0].bb)
test_case.assertEqual(unittest_pb2.TestAllTypes.BAR,
message.repeated_nested_enum[0])
test_case.assertEqual(unittest_pb2.FOREIGN_BAR,
message.repeated_foreign_enum[0])
test_case.assertEqual(unittest_import_pb2.IMPORT_BAR,
message.repeated_import_enum[0])
test_case.assertEqual(301, message.repeated_int32[1])
test_case.assertEqual(302, message.repeated_int64[1])
test_case.assertEqual(303, message.repeated_uint32[1])
test_case.assertEqual(304, message.repeated_uint64[1])
test_case.assertEqual(305, message.repeated_sint32[1])
test_case.assertEqual(306, message.repeated_sint64[1])
test_case.assertEqual(307, message.repeated_fixed32[1])
test_case.assertEqual(308, message.repeated_fixed64[1])
test_case.assertEqual(309, message.repeated_sfixed32[1])
test_case.assertEqual(310, message.repeated_sfixed64[1])
test_case.assertEqual(311, message.repeated_float[1])
test_case.assertEqual(312, message.repeated_double[1])
test_case.assertEqual(False, message.repeated_bool[1])
test_case.assertEqual('315', message.repeated_string[1])
test_case.assertEqual('316', message.repeated_bytes[1])
test_case.assertEqual(317, message.repeatedgroup[1].a)
test_case.assertEqual(318, message.repeated_nested_message[1].bb)
test_case.assertEqual(319, message.repeated_foreign_message[1].c)
test_case.assertEqual(320, message.repeated_import_message[1].d)
test_case.assertEqual(327, message.repeated_lazy_message[1].bb)
test_case.assertEqual(unittest_pb2.TestAllTypes.BAZ,
message.repeated_nested_enum[1])
test_case.assertEqual(unittest_pb2.FOREIGN_BAZ,
message.repeated_foreign_enum[1])
test_case.assertEqual(unittest_import_pb2.IMPORT_BAZ,
message.repeated_import_enum[1])
# -----------------------------------------------------------------
test_case.assertTrue(message.HasField('default_int32'))
test_case.assertTrue(message.HasField('default_int64'))
test_case.assertTrue(message.HasField('default_uint32'))
test_case.assertTrue(message.HasField('default_uint64'))
test_case.assertTrue(message.HasField('default_sint32'))
test_case.assertTrue(message.HasField('default_sint64'))
test_case.assertTrue(message.HasField('default_fixed32'))
test_case.assertTrue(message.HasField('default_fixed64'))
test_case.assertTrue(message.HasField('default_sfixed32'))
test_case.assertTrue(message.HasField('default_sfixed64'))
test_case.assertTrue(message.HasField('default_float'))
test_case.assertTrue(message.HasField('default_double'))
test_case.assertTrue(message.HasField('default_bool'))
test_case.assertTrue(message.HasField('default_string'))
test_case.assertTrue(message.HasField('default_bytes'))
test_case.assertTrue(message.HasField('default_nested_enum'))
test_case.assertTrue(message.HasField('default_foreign_enum'))
test_case.assertTrue(message.HasField('default_import_enum'))
test_case.assertEqual(401, message.default_int32)
test_case.assertEqual(402, message.default_int64)
test_case.assertEqual(403, message.default_uint32)
test_case.assertEqual(404, message.default_uint64)
test_case.assertEqual(405, message.default_sint32)
test_case.assertEqual(406, message.default_sint64)
test_case.assertEqual(407, message.default_fixed32)
test_case.assertEqual(408, message.default_fixed64)
test_case.assertEqual(409, message.default_sfixed32)
test_case.assertEqual(410, message.default_sfixed64)
test_case.assertEqual(411, message.default_float)
test_case.assertEqual(412, message.default_double)
test_case.assertEqual(False, message.default_bool)
test_case.assertEqual('415', message.default_string)
test_case.assertEqual('416', message.default_bytes)
test_case.assertEqual(unittest_pb2.TestAllTypes.FOO,
message.default_nested_enum)
test_case.assertEqual(unittest_pb2.FOREIGN_FOO,
message.default_foreign_enum)
test_case.assertEqual(unittest_import_pb2.IMPORT_FOO,
message.default_import_enum)
def GoldenFile(filename):
"""Finds the given golden file and returns a file object representing it."""
# Search up the directory tree looking for the C++ protobuf source code.
path = '.'
while os.path.exists(path):
if os.path.exists(os.path.join(path, 'src/google/protobuf')):
# Found it. Load the golden file from the testdata directory.
full_path = os.path.join(path, 'src/google/protobuf/testdata', filename)
return open(full_path, 'rb')
path = os.path.join(path, '..')
raise RuntimeError(
'Could not find golden files. This test must be run from within the '
'protobuf source package so that it can read test data files from the '
'C++ source tree.')
def SetAllPackedFields(message):
"""Sets every field in the message to a unique value.
Args:
message: A unittest_pb2.TestPackedTypes instance.
"""
message.packed_int32.extend([601, 701])
message.packed_int64.extend([602, 702])
message.packed_uint32.extend([603, 703])
message.packed_uint64.extend([604, 704])
message.packed_sint32.extend([605, 705])
message.packed_sint64.extend([606, 706])
message.packed_fixed32.extend([607, 707])
message.packed_fixed64.extend([608, 708])
message.packed_sfixed32.extend([609, 709])
message.packed_sfixed64.extend([610, 710])
message.packed_float.extend([611.0, 711.0])
message.packed_double.extend([612.0, 712.0])
message.packed_bool.extend([True, False])
message.packed_enum.extend([unittest_pb2.FOREIGN_BAR,
unittest_pb2.FOREIGN_BAZ])
def SetAllPackedExtensions(message):
"""Sets every extension in the message to a unique value.
Args:
message: A unittest_pb2.TestPackedExtensions instance.
"""
extensions = message.Extensions
pb2 = unittest_pb2
extensions[pb2.packed_int32_extension].extend([601, 701])
extensions[pb2.packed_int64_extension].extend([602, 702])
extensions[pb2.packed_uint32_extension].extend([603, 703])
extensions[pb2.packed_uint64_extension].extend([604, 704])
extensions[pb2.packed_sint32_extension].extend([605, 705])
extensions[pb2.packed_sint64_extension].extend([606, 706])
extensions[pb2.packed_fixed32_extension].extend([607, 707])
extensions[pb2.packed_fixed64_extension].extend([608, 708])
extensions[pb2.packed_sfixed32_extension].extend([609, 709])
extensions[pb2.packed_sfixed64_extension].extend([610, 710])
extensions[pb2.packed_float_extension].extend([611.0, 711.0])
extensions[pb2.packed_double_extension].extend([612.0, 712.0])
extensions[pb2.packed_bool_extension].extend([True, False])
extensions[pb2.packed_enum_extension].extend([unittest_pb2.FOREIGN_BAR,
unittest_pb2.FOREIGN_BAZ])
def SetAllUnpackedFields(message):
"""Sets every field in the message to a unique value.
Args:
message: A unittest_pb2.TestUnpackedTypes instance.
"""
message.unpacked_int32.extend([601, 701])
message.unpacked_int64.extend([602, 702])
message.unpacked_uint32.extend([603, 703])
message.unpacked_uint64.extend([604, 704])
message.unpacked_sint32.extend([605, 705])
message.unpacked_sint64.extend([606, 706])
message.unpacked_fixed32.extend([607, 707])
message.unpacked_fixed64.extend([608, 708])
message.unpacked_sfixed32.extend([609, 709])
message.unpacked_sfixed64.extend([610, 710])
message.unpacked_float.extend([611.0, 711.0])
message.unpacked_double.extend([612.0, 712.0])
message.unpacked_bool.extend([True, False])
message.unpacked_enum.extend([unittest_pb2.FOREIGN_BAR,
unittest_pb2.FOREIGN_BAZ])
|
examples/example.py | RonenTRA/faster-than-requests | 857 | 11073057 | <reponame>RonenTRA/faster-than-requests
import faster_than_requests as requests
requests.init_client()
print(requests.get("http://httpbin.org/get")) # HTTP GET.
print(requests.post("http://httpbin.org/post", """{"foo": "bar", "baz": true}""")) # HTTP POST.
print(requests.put("http://httpbin.org/put", """{"foo": "bar", "baz": true}""")) # HTTP PUT.
print(requests.delete("http://httpbin.org/delete")) # HTTP DELETE.
print(requests.patch("http://httpbin.org/patch", """{"foo": "bar", "baz": true}""")) # HTTP PATCH.
print(requests.get2str("http://httpbin.org/get")) # HTTP GET body only to string response.
print(requests.get2dict("http://httpbin.org/get")) # HTTP GET body only to dictionary response.
print(requests.get2json("http://httpbin.org/get")) # HTTP GET body only to JSON response.
print(requests.post2str("http://httpbin.org/post", """{"foo": "bar", "baz": true}""")) # HTTP POST data only to string response.
print(requests.post2dict("http://httpbin.org/post", """{"foo": "bar", "baz": true}""")) # HTTP POST data only to dictionary response.
print(requests.post2json("http://httpbin.org/post", """{"foo": "bar", "baz": true}""")) # HTTP POST data to JSON response.
print(requests.download("http://httpbin.org/image/jpeg", "foo.jpeg")) # HTTP GET Download 1 file.
print(requests.get2str2(["http://httpbin.org/json", "http://httpbin.org/xml"])) # HTTP GET body to string from a list.
print(requests.download2([("http://httpbin.org/image/jpeg", "foo.jpg"), # HTTP GET Download a list of files.
("http://httpbin.org/image/svg", "bar.svg")]))
requests.set_headers([("key", "value")]) # Set HTTP Headers example.
requests.debugConfig() # Debug the internal Configuration.
print(requests.tuples2json([("key0", "value0"), ("key1", "value1")]))
|
learn2learn-master/learn2learn/nn/misc.py | hikmatkhan/Higher | 1,774 | 11073063 | <filename>learn2learn-master/learn2learn/nn/misc.py
#!/usr/bin/env python3
import torch
class Lambda(torch.nn.Module):
"""
[[Source]](https://github.com/learnables/learn2learn/blob/master/learn2learn/nn/misc.py)
**Description**
Utility class to create a wrapper based on a lambda function.
**Arguments**
* **lmb** (callable) - The function to call in the forward pass.
**Example**
~~~python
mean23 = Lambda(lambda x: x.mean(dim=[2, 3])) # mean23 is a Module
x = features(img)
x = mean23(x)
x = x.flatten()
~~~
"""
def __init__(self, lmb):
super(Lambda, self).__init__()
self.lmb = lmb
def forward(self, *args, **kwargs):
return self.lmb(*args, **kwargs)
class Flatten(torch.nn.Module):
"""
[[Source]](https://github.com/learnables/learn2learn/blob/master/learn2learn/nn/misc.py)
**Description**
Utility Module to flatten inputs to `(batch_size, -1)` shape.
**Example**
~~~python
flatten = Flatten()
x = torch.randn(5, 3, 32, 32)
x = flatten(x)
print(x.shape) # (5, 3072)
~~~
"""
def forward(self, x):
return x.view(x.size(0), -1)
class Scale(torch.nn.Module):
"""
[[Source]](https://github.com/learnables/learn2learn/blob/master/learn2learn/nn/misc.py)
**Description**
A per-parameter scaling factor with learnable parameter.
**Arguments**
* **shape** (int or tuple) - The shape of the scaling matrix.
* **alpha** (float, *optional*, default=1.0) - Initial value for the
scaling factor.
**Example**
~~~python
x = torch.ones(3)
scale = Scale(x.shape, alpha=0.5)
print(scale(x)) # [.5, .5, .5]
~~~
"""
def __init__(self, shape, alpha=1.0):
super(Scale, self).__init__()
if isinstance(shape, int):
shape = (shape, )
alpha = torch.ones(**shape) * alpha
self.alpha = torch.nn.Parameter(alpha)
def forward(self, x):
return x * self.alpha
|
tests/python/relay/test_pass_partial_eval.py | shengxinhu/tvm | 4,640 | 11073123 | <reponame>shengxinhu/tvm<filename>tests/python/relay/test_pass_partial_eval.py
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
import tvm.testing
from tvm import relay
from tvm.relay.prelude import Prelude
from tvm.relay import op, create_executor, transform
from tvm.relay import Var, TypeVar, TupleGetItem, Let, Function, const, RefRead, RefWrite, RefCreate
from tvm.relay import TensorType, Tuple, If, Clause, PatternConstructor, PatternVar, Match
from tvm.relay import GlobalVar, Call
from tvm.relay.transform import gradient
from tvm.relay.testing import make_nat_expr, run_infer_type
def check_eval(expr, expected_result, mod=None, rtol=1e-07):
dev = tvm.device("llvm", 0)
result = create_executor(mod=mod, device=dev, target="llvm").evaluate(expr)
np.testing.assert_allclose(result.numpy(), expected_result, rtol=rtol)
def run_opt_pass(expr, passes):
passes = passes if isinstance(passes, list) else [passes]
mod = tvm.IRModule.from_expr(expr)
seq = tvm.transform.Sequential(passes)
with tvm.transform.PassContext(opt_level=3):
mod = seq(mod)
entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
def tipe(expr):
return run_opt_pass(expr, [transform.PartialEvaluate(), transform.InferType()])
def dcpe(expr, mod=None, grad=False, ignore_impurity=False):
passes = [
transform.PartialEvaluate(),
transform.InferType(),
transform.DeadCodeElimination(inline_once=True, ignore_impurity=ignore_impurity),
transform.InferType(),
]
if grad:
expr = gradient(run_infer_type(expr))
if mod:
assert isinstance(expr, Function)
mod["main"] = expr
seq = tvm.transform.Sequential(passes)
mod = seq(mod)
return mod["main"]
return run_opt_pass(expr, passes)
def test_tuple():
t = TypeVar("t")
x = Var("x", t)
body = TupleGetItem(relay.Tuple([relay.const(4.0), x]), 1)
f = Function([x], body, None, [t])
expected = relay.Function([x], x, None, [t])
expected = run_opt_pass(expected, transform.InferType())
assert tvm.ir.structural_equal(dcpe(f), expected)
def test_const_inline():
t = relay.TensorType([], "float32")
d = Var("d", t)
double = Function([d], d + d)
orig = double(const(4.0))
assert tvm.ir.structural_equal(dcpe(orig), const(8.0))
def test_ref():
t = relay.TensorType([], "float32")
d = relay.Var("d", t)
r = relay.Var("r", relay.RefType(t))
x = relay.Var("x")
body = relay.RefRead(r)
body = Let(x, RefWrite(r, RefRead(r) * RefRead(r)), body)
body = Let(r, RefCreate(d), body)
square = Function([d], body)
expected = run_opt_pass(Function([d], d * d), transform.InferType())
# TODO(mbs): Revisit once DCE eliminates dead writes.
actual = dcpe(square, ignore_impurity=True)
assert tvm.ir.structural_equal(actual, expected)
def test_empty_ad():
shape = (10, 10)
dtype = "float32"
t = TensorType(shape, dtype)
d = Var("d", t)
f = Function([d], d)
# TODO(mbs): Revisit once DCE eliminates dead writes.
g = dcpe(f, grad=True, ignore_impurity=True)
expected = Function([d], Tuple([d, Tuple([op.ones_like(d)])]))
expected = run_opt_pass(expected, transform.InferType())
assert tvm.ir.structural_equal(g, expected)
def test_ad():
shape = (10, 10)
dtype = "float32"
t = TensorType(shape, dtype)
d = Var("d", t)
f = Function([d], d * d)
# TODO(mbs): Revisit once DCE eliminates dead writes.
g = dcpe(f, grad=True, ignore_impurity=True)
m = d * d
x = relay.Var("x")
o = op.ones_like(x)
x1 = relay.Var("x1")
grad = op.zeros_like(d) + op.collapse_sum_like(x1 * d, d) + op.collapse_sum_like(x1 * d, d)
body = Tuple([x, Tuple([grad])])
body = relay.Let(x1, o, body)
expected = Function([d], relay.Let(x, m, body))
expected = run_opt_pass(expected, transform.InferType())
tvm.ir.assert_structural_equal(g, expected)
def test_if_ref():
shape = ()
dtype = "bool"
t = TensorType(shape, dtype)
d = Var("d", t)
r = Var("r")
update = Function([], RefWrite(r, RefRead(r) + RefRead(r)))
u = Var("u")
body = If(d, u(), u())
eff = Var("eff")
body = Let(eff, body, RefRead(r))
f = Function([d], Let(r, RefCreate(const(1)), Let(u, update, body)))
pe_f = tipe(f)
f_res = create_executor().evaluate(f)(const(True))
pe_f_res = create_executor().evaluate(pe_f)(const(True))
np.testing.assert_allclose(f_res.numpy(), 2 * np.ones_like(f_res.numpy()))
np.testing.assert_allclose(pe_f_res.numpy(), 2 * np.ones_like(pe_f_res.numpy()))
def test_function_invalidate():
shape = ()
dtype = "bool"
t = TensorType(shape, dtype)
d = Var("d", t)
r = Var("r")
fetch = Function([], RefRead(r))
fet = Var("fetch")
fet_obscured = Var("fetch_obscured")
u = Var("u")
body = If(d, fet_obscured(), fet_obscured())
body = Let(u, RefWrite(r, const(1)), body)
body = Let(fet_obscured, If(d, fet, fet), body)
body = Let(fet, fetch, body)
body = Let(r, RefCreate(const(0)), body)
f = Function([d], body)
pe_f = tipe(f)
f_res = create_executor().evaluate(f)(const(True))
pe_f_res = create_executor().evaluate(pe_f)(const(True))
np.testing.assert_allclose(f_res.numpy(), np.ones_like(f_res.numpy()))
np.testing.assert_allclose(pe_f_res.numpy(), np.ones_like(pe_f_res.numpy()))
def test_head_cons():
mod = tvm.IRModule()
p = Prelude(mod)
t = TypeVar("t")
x = Var("x", t)
rlist, cons, nil = p.mod.get_type("List")
hd = p.mod.get_global_var("hd")
body = hd(cons(x, nil()))
f = Function([x], body, None, [t])
res = dcpe(f, mod)
expected_mod = tvm.IRModule.from_expr(Function([x], x, t, [t]))
assert tvm.ir.structural_equal(res, expected_mod["main"])
def test_map():
mod = tvm.IRModule()
p = Prelude(mod)
rlist, cons, nil = p.mod.get_type("List")
rmap = p.mod.get_global_var("map")
f = GlobalVar("f")
t = TypeVar("t")
a = Var("a", t)
mod[f] = Function([a], a, t, [t])
orig = rmap(f, cons(const(1), cons(const(2), cons(const(3), nil()))))
expected = cons((const(1)), cons((const(2)), cons((const(3)), nil())))
expected = Function([], expected)
mod["main"] = expected
mod = transform.InferType()(mod)
expected = mod["main"]
orig = Function([], orig)
res = dcpe(orig, mod=mod)
assert tvm.ir.structural_equal(res.body, expected.body)
def test_loop():
mod = tvm.IRModule()
t = TypeVar("t")
x = Var("x", t)
loop = GlobalVar("loop")
mod[loop] = Function([x], loop(x), t, [t])
expected = Call(loop, [const(1)])
mod["main"] = Function([], expected)
mod = transform.InferType()(mod)
expected = mod["main"].body
call = Function([], loop(const(1)))
res = dcpe(call, mod=mod)
assert tvm.ir.structural_equal(res.body, expected)
def test_swap_loop():
mod = tvm.IRModule()
p = Prelude(mod)
p.mod.import_from_std("nat.rly")
nat, _, _ = p.mod.get_type("nat")
x = Var("x", nat())
y = Var("y", nat())
loop = GlobalVar("loop")
mod[loop] = Function([x, y], loop(y, x), nat())
prog = loop(make_nat_expr(p, 1), make_nat_expr(p, 2))
res = Function([], prog)
res = dcpe(res, mod=mod)
assert tvm.ir.structural_equal(prog, res.body)
def test_abs_diff():
# TODO(@M.K.): refactor using tuple pattern (not yet implemented)
mod = tvm.IRModule()
p = Prelude(mod)
p.mod.import_from_std("nat.rly")
nat, z, s = p.mod.get_type("nat")
x = Var("x", nat())
y = Var("y", nat())
xp = Var("x'", nat())
yp = Var("y'", nat())
diff = GlobalVar("diff")
y_z_case = Clause(PatternConstructor(z, []), x)
y_s_case = Clause(PatternConstructor(s, [PatternVar(yp)]), diff(yp, xp))
x_z_case = Clause(PatternConstructor(z, []), y)
x_s_case = Clause(PatternConstructor(s, [PatternVar(xp)]), Match(y, [y_z_case, y_s_case]))
mod[diff] = Function([x, y], Match(x, [x_z_case, x_s_case]))
orig = diff(make_nat_expr(p, 7), make_nat_expr(p, 3))
orig = Function([], orig)
res = dcpe(orig, mod=mod)
assert tvm.ir.structural_equal(res.body, make_nat_expr(p, 4))
def test_match_nat_id():
mod = tvm.IRModule()
p = Prelude(mod)
p.mod.import_from_std("nat.rly")
nat, z, s = p.mod.get_type("nat")
x = Var("x", nat())
y = Var("y", nat())
nat_id = GlobalVar("nat_id")
z_case = Clause(PatternConstructor(z, []), z())
s_case = Clause(PatternConstructor(s, [PatternVar(y)]), s(y))
mod[nat_id] = Function([x], Match(x, [z_case, s_case]))
orig = nat_id(make_nat_expr(p, 3))
orig = Function([], orig)
res = dcpe(orig, mod=mod)
assert tvm.ir.structural_equal(res.body, make_nat_expr(p, 3))
def test_nat_id():
mod = tvm.IRModule()
p = Prelude(mod)
p.mod.import_from_std("nat.rly")
nat, _, _ = p.mod.get_type("nat")
x = Var("x", nat())
y = Var("y", nat())
nat_id = GlobalVar("nat_id")
mod[nat_id] = Function([x], x)
orig = nat_id(make_nat_expr(p, 3))
orig = Function([], orig)
res = dcpe(orig, mod=mod)
assert tvm.ir.structural_equal(res.body, make_nat_expr(p, 3))
def test_global_match_nat_id():
mod = tvm.IRModule()
p = Prelude(mod)
p.mod.import_from_std("nat.rly")
nat, z, s = p.mod.get_type("nat")
x = Var("x", nat())
z_case = Clause(PatternConstructor(z, []), z())
s_case = Clause(PatternConstructor(s, [PatternVar(x)]), s(x))
orig = Match(make_nat_expr(p, 3), [z_case, s_case])
orig = Function([], orig)
res = dcpe(orig, mod=mod)
assert tvm.ir.structural_equal(res.body, make_nat_expr(p, 3))
def test_double():
mod = tvm.IRModule()
p = Prelude(mod)
p.mod.import_from_std("nat.rly")
double = p.mod.get_global_var("nat_double")
orig = double(make_nat_expr(p, 3))
orig = Function([], orig)
res = dcpe(orig, mod=mod)
assert tvm.ir.structural_equal(res.body, make_nat_expr(p, 6))
def test_concat():
t = relay.TensorType([10], "float32")
x = Var("x", t)
y = Var("x", t)
orig = run_infer_type(Function([x, y], op.concatenate([x, y], axis=0)))
tvm.ir.assert_structural_equal(dcpe(orig), orig)
def test_triangle_number():
t = relay.TensorType([], "int32")
x = Var("x", t)
f_var = Var("f")
f = Function([x], If(op.equal(x, const(0)), const(0), x + f_var(x - const(1))))
orig = run_infer_type(Let(f_var, f, f_var(const(10))))
tvm.ir.assert_structural_equal(dcpe(orig), const(55))
def test_nat_update():
m = tvm.IRModule()
p = Prelude(m)
p.mod.import_from_std("nat.rly")
m = transform.ToANormalForm()(m)
transform.PartialEvaluate()(m)
def test_tuple_match():
a = relay.Var("a")
b = relay.Var("b")
clause = relay.Clause(relay.PatternTuple([relay.PatternVar(a), relay.PatternVar(b)]), a + b)
x = relay.Match(relay.Tuple([relay.const(1), relay.const(1)]), [clause])
tvm.ir.assert_structural_equal(dcpe(x), const(2))
if __name__ == "__main__":
tvm.testing.main()
|
tests/components/mqtt/test_light_template.py | learn-home-automation/core | 22,481 | 11073137 | """The tests for the MQTT Template light platform.
Configuration example with all features:
light:
platform: mqtt_template
name: mqtt_template_light_1
state_topic: 'home/rgb1'
command_topic: 'home/rgb1/set'
command_on_template: >
on,{{ brightness|d }},{{ red|d }}-{{ green|d }}-{{ blue|d }}
command_off_template: 'off'
state_template: '{{ value.split(",")[0] }}'
brightness_template: '{{ value.split(",")[1] }}'
color_temp_template: '{{ value.split(",")[2] }}'
white_value_template: '{{ value.split(",")[3] }}'
red_template: '{{ value.split(",")[4].split("-")[0] }}'
green_template: '{{ value.split(",")[4].split("-")[1] }}'
blue_template: '{{ value.split(",")[4].split("-")[2] }}'
If your light doesn't support brightness feature, omit `brightness_template`.
If your light doesn't support color temp feature, omit `color_temp_template`.
If your light doesn't support white value feature, omit `white_value_template`.
If your light doesn't support RGB feature, omit `(red|green|blue)_template`.
"""
from unittest.mock import patch
import pytest
from homeassistant.components import light
from homeassistant.components.mqtt.light.schema_basic import (
MQTT_LIGHT_ATTRIBUTES_BLOCKED,
)
from homeassistant.const import (
ATTR_ASSUMED_STATE,
ATTR_SUPPORTED_FEATURES,
STATE_OFF,
STATE_ON,
)
import homeassistant.core as ha
from homeassistant.setup import async_setup_component
from .test_common import (
help_test_availability_when_connection_lost,
help_test_availability_without_topic,
help_test_custom_availability_payload,
help_test_default_availability_payload,
help_test_discovery_broken,
help_test_discovery_removal,
help_test_discovery_update,
help_test_discovery_update_attr,
help_test_discovery_update_unchanged,
help_test_entity_debug_info_message,
help_test_entity_device_info_remove,
help_test_entity_device_info_update,
help_test_entity_device_info_with_connection,
help_test_entity_device_info_with_identifier,
help_test_entity_id_update_discovery_update,
help_test_entity_id_update_subscriptions,
help_test_setting_attribute_via_mqtt_json_message,
help_test_setting_attribute_with_template,
help_test_setting_blocked_attribute_via_mqtt_json_message,
help_test_unique_id,
help_test_update_with_json_attrs_bad_JSON,
help_test_update_with_json_attrs_not_dict,
)
from tests.common import assert_setup_component, async_fire_mqtt_message
from tests.components.light import common
DEFAULT_CONFIG = {
light.DOMAIN: {
"platform": "mqtt",
"schema": "template",
"name": "test",
"command_topic": "test-topic",
"command_on_template": "on,{{ transition }}",
"command_off_template": "off,{{ transition|d }}",
}
}
async def test_setup_fails(hass, mqtt_mock):
"""Test that setup fails with missing required configuration items."""
with assert_setup_component(0, light.DOMAIN):
assert await async_setup_component(
hass,
light.DOMAIN,
{light.DOMAIN: {"platform": "mqtt", "schema": "template", "name": "test"}},
)
await hass.async_block_till_done()
assert hass.states.get("light.test") is None
with assert_setup_component(0, light.DOMAIN):
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "template",
"name": "test",
"command_topic": "test_topic",
}
},
)
await hass.async_block_till_done()
assert hass.states.get("light.test") is None
with assert_setup_component(0, light.DOMAIN):
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "template",
"name": "test",
"command_topic": "test_topic",
"command_on_template": "on",
}
},
)
await hass.async_block_till_done()
assert hass.states.get("light.test") is None
with assert_setup_component(0, light.DOMAIN):
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "template",
"name": "test",
"command_topic": "test_topic",
"command_off_template": "off",
}
},
)
await hass.async_block_till_done()
assert hass.states.get("light.test") is None
async def test_rgb_light(hass, mqtt_mock):
"""Test RGB light flags brightness support."""
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "template",
"name": "test",
"command_topic": "test_light_rgb/set",
"command_on_template": "on",
"command_off_template": "off",
"red_template": '{{ value.split(",")[4].' 'split("-")[0] }}',
"green_template": '{{ value.split(",")[4].' 'split("-")[1] }}',
"blue_template": '{{ value.split(",")[4].' 'split("-")[2] }}',
}
},
)
await hass.async_block_till_done()
state = hass.states.get("light.test")
expected_features = (
light.SUPPORT_TRANSITION
| light.SUPPORT_COLOR
| light.SUPPORT_FLASH
| light.SUPPORT_BRIGHTNESS
)
assert state.attributes.get(ATTR_SUPPORTED_FEATURES) == expected_features
async def test_state_change_via_topic(hass, mqtt_mock):
"""Test state change via topic."""
with assert_setup_component(1, light.DOMAIN):
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "template",
"name": "test",
"state_topic": "test_light_rgb",
"command_topic": "test_light_rgb/set",
"command_on_template": "on,"
"{{ brightness|d }},"
"{{ color_temp|d }},"
"{{ white_value|d }},"
"{{ red|d }}-"
"{{ green|d }}-"
"{{ blue|d }}",
"command_off_template": "off",
"state_template": '{{ value.split(",")[0] }}',
}
},
)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get("rgb_color") is None
assert state.attributes.get("brightness") is None
assert state.attributes.get("color_temp") is None
assert state.attributes.get("white_value") is None
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "test_light_rgb", "on")
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("rgb_color") is None
assert state.attributes.get("brightness") is None
assert state.attributes.get("color_temp") is None
assert state.attributes.get("white_value") is None
async def test_state_brightness_color_effect_temp_white_change_via_topic(
hass, mqtt_mock
):
"""Test state, bri, color, effect, color temp, white val change."""
with assert_setup_component(1, light.DOMAIN):
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "template",
"name": "test",
"effect_list": ["rainbow", "colorloop"],
"state_topic": "test_light_rgb",
"command_topic": "test_light_rgb/set",
"command_on_template": "on,"
"{{ brightness|d }},"
"{{ color_temp|d }},"
"{{ white_value|d }},"
"{{ red|d }}-"
"{{ green|d }}-"
"{{ blue|d }},"
"{{ effect|d }}",
"command_off_template": "off",
"state_template": '{{ value.split(",")[0] }}',
"brightness_template": '{{ value.split(",")[1] }}',
"color_temp_template": '{{ value.split(",")[2] }}',
"white_value_template": '{{ value.split(",")[3] }}',
"red_template": '{{ value.split(",")[4].' 'split("-")[0] }}',
"green_template": '{{ value.split(",")[4].' 'split("-")[1] }}',
"blue_template": '{{ value.split(",")[4].' 'split("-")[2] }}',
"effect_template": '{{ value.split(",")[5] }}',
}
},
)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get("rgb_color") is None
assert state.attributes.get("brightness") is None
assert state.attributes.get("effect") is None
assert state.attributes.get("color_temp") is None
assert state.attributes.get("white_value") is None
assert not state.attributes.get(ATTR_ASSUMED_STATE)
# turn on the light, full white
async_fire_mqtt_message(hass, "test_light_rgb", "on,255,145,123,255-128-64,")
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("rgb_color") == (255, 128, 63)
assert state.attributes.get("brightness") == 255
assert state.attributes.get("color_temp") == 145
assert state.attributes.get("white_value") == 123
assert state.attributes.get("effect") is None
# turn the light off
async_fire_mqtt_message(hass, "test_light_rgb", "off")
state = hass.states.get("light.test")
assert state.state == STATE_OFF
# lower the brightness
async_fire_mqtt_message(hass, "test_light_rgb", "on,100")
light_state = hass.states.get("light.test")
assert light_state.attributes["brightness"] == 100
# change the color temp
async_fire_mqtt_message(hass, "test_light_rgb", "on,,195")
light_state = hass.states.get("light.test")
assert light_state.attributes["color_temp"] == 195
# change the color
async_fire_mqtt_message(hass, "test_light_rgb", "on,,,,41-42-43")
light_state = hass.states.get("light.test")
assert light_state.attributes.get("rgb_color") == (243, 249, 255)
# change the white value
async_fire_mqtt_message(hass, "test_light_rgb", "on,,,134")
light_state = hass.states.get("light.test")
assert light_state.attributes["white_value"] == 134
# change the effect
async_fire_mqtt_message(hass, "test_light_rgb", "on,,,,41-42-43,rainbow")
light_state = hass.states.get("light.test")
assert light_state.attributes.get("effect") == "rainbow"
async def test_sending_mqtt_commands_and_optimistic(hass, mqtt_mock):
"""Test the sending of command in optimistic mode."""
fake_state = ha.State(
"light.test",
"on",
{
"brightness": 95,
"hs_color": [100, 100],
"effect": "random",
"color_temp": 100,
"white_value": 50,
},
)
with patch(
"homeassistant.helpers.restore_state.RestoreEntity.async_get_last_state",
return_value=fake_state,
), assert_setup_component(1, light.DOMAIN):
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "template",
"name": "test",
"command_topic": "test_light_rgb/set",
"command_on_template": "on,"
"{{ brightness|d }},"
"{{ color_temp|d }},"
"{{ white_value|d }},"
"{{ red|d }}-"
"{{ green|d }}-"
"{{ blue|d }}",
"command_off_template": "off",
"effect_list": ["colorloop", "random"],
"optimistic": True,
"state_template": '{{ value.split(",")[0] }}',
"color_temp_template": '{{ value.split(",")[2] }}',
"white_value_template": '{{ value.split(",")[3] }}',
"red_template": '{{ value.split(",")[4].' 'split("-")[0] }}',
"green_template": '{{ value.split(",")[4].' 'split("-")[1] }}',
"blue_template": '{{ value.split(",")[4].' 'split("-")[2] }}',
"effect_template": '{{ value.split(",")[5] }}',
"qos": 2,
}
},
)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("hs_color") == (100, 100)
assert state.attributes.get("effect") == "random"
assert state.attributes.get("color_temp") == 100
assert state.attributes.get("white_value") == 50
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_turn_off(hass, "light.test")
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", "off", 2, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
await common.async_turn_on(hass, "light.test")
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", "on,,,,--", 2, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_ON
# Set color_temp
await common.async_turn_on(hass, "light.test", color_temp=70)
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", "on,,70,,--", 2, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("color_temp") == 70
# Set full brightness
await common.async_turn_on(hass, "light.test", brightness=255)
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", "on,255,,,--", 2, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_ON
# Full brightness - no scaling of RGB values sent over MQTT
await common.async_turn_on(
hass, "light.test", rgb_color=[255, 128, 0], white_value=80
)
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", "on,,,80,255-128-0", 2, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("white_value") == 80
assert state.attributes.get("rgb_color") == (255, 128, 0)
# Full brightness - normalization of RGB values sent over MQTT
await common.async_turn_on(hass, "light.test", rgb_color=[128, 64, 0])
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", "on,,,,255-127-0", 2, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("rgb_color") == (255, 127, 0)
# Set half brightness
await common.async_turn_on(hass, "light.test", brightness=128)
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", "on,128,,,--", 2, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_ON
# Half brightness - scaling of RGB values sent over MQTT
await common.async_turn_on(
hass, "light.test", rgb_color=[0, 255, 128], white_value=40
)
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", "on,,,40,0-128-64", 2, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("white_value") == 40
assert state.attributes.get("rgb_color") == (0, 255, 128)
# Half brightness - normalization+scaling of RGB values sent over MQTT
await common.async_turn_on(
hass, "light.test", rgb_color=[0, 32, 16], white_value=40
)
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", "on,,,40,0-128-64", 2, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("white_value") == 40
assert state.attributes.get("rgb_color") == (0, 255, 127)
async def test_sending_mqtt_commands_non_optimistic_brightness_template(
hass, mqtt_mock
):
"""Test the sending of command in optimistic mode."""
with assert_setup_component(1, light.DOMAIN):
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "template",
"name": "test",
"effect_list": ["rainbow", "colorloop"],
"state_topic": "test_light_rgb",
"command_topic": "test_light_rgb/set",
"command_on_template": "on,"
"{{ brightness|d }},"
"{{ color_temp|d }},"
"{{ white_value|d }},"
"{{ red|d }}-"
"{{ green|d }}-"
"{{ blue|d }}",
"command_off_template": "off",
"state_template": '{{ value.split(",")[0] }}',
"brightness_template": '{{ value.split(",")[1] }}',
"color_temp_template": '{{ value.split(",")[2] }}',
"white_value_template": '{{ value.split(",")[3] }}',
"red_template": '{{ value.split(",")[4].' 'split("-")[0] }}',
"green_template": '{{ value.split(",")[4].' 'split("-")[1] }}',
"blue_template": '{{ value.split(",")[4].' 'split("-")[2] }}',
"effect_template": '{{ value.split(",")[5] }}',
}
},
)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert not state.attributes.get("brightness")
assert not state.attributes.get("hs_color")
assert not state.attributes.get("effect")
assert not state.attributes.get("color_temp")
assert not state.attributes.get("white_value")
assert not state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_turn_off(hass, "light.test")
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", "off", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
await common.async_turn_on(hass, "light.test")
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", "on,,,,--", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
# Set color_temp
await common.async_turn_on(hass, "light.test", color_temp=70)
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", "on,,70,,--", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert not state.attributes.get("color_temp")
# Set full brightness
await common.async_turn_on(hass, "light.test", brightness=255)
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", "on,255,,,--", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert not state.attributes.get("brightness")
# Full brightness - no scaling of RGB values sent over MQTT
await common.async_turn_on(
hass, "light.test", rgb_color=[255, 128, 0], white_value=80
)
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", "on,,,80,255-128-0", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert not state.attributes.get("white_value")
assert not state.attributes.get("rgb_color")
# Full brightness - normalization of RGB values sent over MQTT
await common.async_turn_on(hass, "light.test", rgb_color=[128, 64, 0])
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", "on,,,,255-127-0", 0, False
)
mqtt_mock.async_publish.reset_mock()
# Set half brightness
await common.async_turn_on(hass, "light.test", brightness=128)
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", "on,128,,,--", 0, False
)
mqtt_mock.async_publish.reset_mock()
# Half brightness - no scaling of RGB values sent over MQTT
await common.async_turn_on(
hass, "light.test", rgb_color=[0, 255, 128], white_value=40
)
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", "on,,,40,0-255-128", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
# Half brightness - normalization but no scaling of RGB values sent over MQTT
await common.async_turn_on(
hass, "light.test", rgb_color=[0, 32, 16], white_value=40
)
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", "on,,,40,0-255-127", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
async def test_effect(hass, mqtt_mock):
"""Test effect sent over MQTT in optimistic mode."""
with assert_setup_component(1, light.DOMAIN):
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "template",
"effect_list": ["rainbow", "colorloop"],
"name": "test",
"command_topic": "test_light_rgb/set",
"command_on_template": "on,{{ effect }}",
"command_off_template": "off",
"qos": 0,
}
},
)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get(ATTR_SUPPORTED_FEATURES) == 44
await common.async_turn_on(hass, "light.test")
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", "on,", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert not state.attributes.get("effect")
await common.async_turn_on(hass, "light.test", effect="rainbow")
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", "on,rainbow", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("effect") == "rainbow"
await common.async_turn_on(hass, "light.test", effect="colorloop")
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", "on,colorloop", 0, False
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("effect") == "colorloop"
async def test_flash(hass, mqtt_mock):
"""Test flash sent over MQTT in optimistic mode."""
with assert_setup_component(1, light.DOMAIN):
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "template",
"name": "test",
"command_topic": "test_light_rgb/set",
"command_on_template": "on,{{ flash }}",
"command_off_template": "off",
"qos": 0,
}
},
)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get(ATTR_SUPPORTED_FEATURES) == 40
await common.async_turn_on(hass, "light.test")
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", "on,", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_ON
await common.async_turn_on(hass, "light.test", flash="short")
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", "on,short", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_ON
await common.async_turn_on(hass, "light.test", flash="long")
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", "on,long", 0, False
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
async def test_transition(hass, mqtt_mock):
"""Test for transition time being sent when included."""
with assert_setup_component(1, light.DOMAIN):
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "template",
"name": "test",
"command_topic": "test_light_rgb/set",
"command_on_template": "on,{{ transition }}",
"command_off_template": "off,{{ transition|int|d }}",
"qos": 1,
}
},
)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get(ATTR_SUPPORTED_FEATURES) == 40
await common.async_turn_on(hass, "light.test", transition=10.0)
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", "on,10.0", 1, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_ON
await common.async_turn_off(hass, "light.test", transition=20.0)
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", "off,20", 1, False
)
state = hass.states.get("light.test")
assert state.state == STATE_OFF
async def test_invalid_values(hass, mqtt_mock):
"""Test that invalid values are ignored."""
with assert_setup_component(1, light.DOMAIN):
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "template",
"name": "test",
"effect_list": ["rainbow", "colorloop"],
"state_topic": "test_light_rgb",
"command_topic": "test_light_rgb/set",
"command_on_template": "on,"
"{{ brightness|d }},"
"{{ color_temp|d }},"
"{{ red|d }}-"
"{{ green|d }}-"
"{{ blue|d }},"
"{{ effect|d }}",
"command_off_template": "off",
"state_template": '{{ value.split(",")[0] }}',
"brightness_template": '{{ value.split(",")[1] }}',
"color_temp_template": '{{ value.split(",")[2] }}',
"white_value_template": '{{ value.split(",")[3] }}',
"red_template": '{{ value.split(",")[4].' 'split("-")[0] }}',
"green_template": '{{ value.split(",")[4].' 'split("-")[1] }}',
"blue_template": '{{ value.split(",")[4].' 'split("-")[2] }}',
"effect_template": '{{ value.split(",")[5] }}',
}
},
)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get("rgb_color") is None
assert state.attributes.get("brightness") is None
assert state.attributes.get("color_temp") is None
assert state.attributes.get("effect") is None
assert state.attributes.get("white_value") is None
assert not state.attributes.get(ATTR_ASSUMED_STATE)
# turn on the light, full white
async_fire_mqtt_message(
hass, "test_light_rgb", "on,255,215,222,255-255-255,rainbow"
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("brightness") == 255
assert state.attributes.get("color_temp") == 215
assert state.attributes.get("rgb_color") == (255, 255, 255)
assert state.attributes.get("white_value") == 222
assert state.attributes.get("effect") == "rainbow"
# bad state value
async_fire_mqtt_message(hass, "test_light_rgb", "offf")
# state should not have changed
state = hass.states.get("light.test")
assert state.state == STATE_ON
# bad brightness values
async_fire_mqtt_message(hass, "test_light_rgb", "on,off,255-255-255")
# brightness should not have changed
state = hass.states.get("light.test")
assert state.attributes.get("brightness") == 255
# bad color temp values
async_fire_mqtt_message(hass, "test_light_rgb", "on,,off,255-255-255")
# color temp should not have changed
state = hass.states.get("light.test")
assert state.attributes.get("color_temp") == 215
# bad color values
async_fire_mqtt_message(hass, "test_light_rgb", "on,255,a-b-c")
# color should not have changed
state = hass.states.get("light.test")
assert state.attributes.get("rgb_color") == (255, 255, 255)
# bad white value values
async_fire_mqtt_message(hass, "test_light_rgb", "on,,,off,255-255-255")
# white value should not have changed
state = hass.states.get("light.test")
assert state.attributes.get("white_value") == 222
# bad effect value
async_fire_mqtt_message(hass, "test_light_rgb", "on,255,a-b-c,white")
# effect should not have changed
state = hass.states.get("light.test")
assert state.attributes.get("effect") == "rainbow"
async def test_availability_when_connection_lost(hass, mqtt_mock):
"""Test availability after MQTT disconnection."""
await help_test_availability_when_connection_lost(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_availability_without_topic(hass, mqtt_mock):
"""Test availability without defined availability topic."""
await help_test_availability_without_topic(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_default_availability_payload(hass, mqtt_mock):
"""Test availability by default payload with defined topic."""
await help_test_default_availability_payload(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_custom_availability_payload(hass, mqtt_mock):
"""Test availability by custom payload with defined topic."""
await help_test_custom_availability_payload(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_setting_attribute_via_mqtt_json_message(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_attribute_via_mqtt_json_message(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_setting_blocked_attribute_via_mqtt_json_message(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_blocked_attribute_via_mqtt_json_message(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG, MQTT_LIGHT_ATTRIBUTES_BLOCKED
)
async def test_setting_attribute_with_template(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_attribute_with_template(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_update_with_json_attrs_not_dict(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
await help_test_update_with_json_attrs_not_dict(
hass, mqtt_mock, caplog, light.DOMAIN, DEFAULT_CONFIG
)
async def test_update_with_json_attrs_bad_JSON(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
await help_test_update_with_json_attrs_bad_JSON(
hass, mqtt_mock, caplog, light.DOMAIN, DEFAULT_CONFIG
)
async def test_discovery_update_attr(hass, mqtt_mock, caplog):
"""Test update of discovered MQTTAttributes."""
await help_test_discovery_update_attr(
hass, mqtt_mock, caplog, light.DOMAIN, DEFAULT_CONFIG
)
async def test_unique_id(hass, mqtt_mock):
"""Test unique id option only creates one light per unique_id."""
config = {
light.DOMAIN: [
{
"platform": "mqtt",
"name": "<NAME>",
"schema": "template",
"state_topic": "test-topic",
"command_topic": "test_topic",
"command_on_template": "on,{{ transition }}",
"command_off_template": "off,{{ transition|d }}",
"unique_id": "TOTALLY_UNIQUE",
},
{
"platform": "mqtt",
"name": "Test 2",
"schema": "template",
"state_topic": "test-topic",
"command_topic": "test_topic",
"unique_id": "TOTALLY_UNIQUE",
},
]
}
await help_test_unique_id(hass, mqtt_mock, light.DOMAIN, config)
async def test_discovery_removal(hass, mqtt_mock, caplog):
"""Test removal of discovered mqtt_json lights."""
data = (
'{ "name": "test",'
' "schema": "template",'
' "command_topic": "test_topic",'
' "command_on_template": "on",'
' "command_off_template": "off"}'
)
await help_test_discovery_removal(hass, mqtt_mock, caplog, light.DOMAIN, data)
async def test_discovery_update_light(hass, mqtt_mock, caplog):
"""Test update of discovered light."""
config1 = {
"name": "Beer",
"schema": "template",
"state_topic": "test_topic",
"command_topic": "test_topic",
"command_on_template": "on",
"command_off_template": "off",
}
config2 = {
"name": "Milk",
"schema": "template",
"state_topic": "test_topic",
"command_topic": "test_topic",
"command_on_template": "on",
"command_off_template": "off",
}
await help_test_discovery_update(
hass, mqtt_mock, caplog, light.DOMAIN, config1, config2
)
async def test_discovery_update_unchanged_light(hass, mqtt_mock, caplog):
"""Test update of discovered light."""
data1 = (
'{ "name": "Beer",'
' "schema": "template",'
' "state_topic": "test_topic",'
' "command_topic": "test_topic",'
' "command_on_template": "on",'
' "command_off_template": "off"}'
)
with patch(
"homeassistant.components.mqtt.light.schema_template.MqttLightTemplate.discovery_update"
) as discovery_update:
await help_test_discovery_update_unchanged(
hass, mqtt_mock, caplog, light.DOMAIN, data1, discovery_update
)
@pytest.mark.no_fail_on_log_exception
async def test_discovery_broken(hass, mqtt_mock, caplog):
"""Test handling of bad discovery message."""
data1 = '{ "name": "Beer" }'
data2 = (
'{ "name": "Milk",'
' "schema": "template",'
' "state_topic": "test_topic",'
' "command_topic": "test_topic",'
' "command_on_template": "on",'
' "command_off_template": "off"}'
)
await help_test_discovery_broken(
hass, mqtt_mock, caplog, light.DOMAIN, data1, data2
)
async def test_entity_device_info_with_connection(hass, mqtt_mock):
"""Test MQTT light device registry integration."""
await help_test_entity_device_info_with_connection(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_with_identifier(hass, mqtt_mock):
"""Test MQTT light device registry integration."""
await help_test_entity_device_info_with_identifier(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_update(hass, mqtt_mock):
"""Test device registry update."""
await help_test_entity_device_info_update(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_remove(hass, mqtt_mock):
"""Test device registry remove."""
await help_test_entity_device_info_remove(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_id_update_subscriptions(hass, mqtt_mock):
"""Test MQTT subscriptions are managed when entity_id is updated."""
await help_test_entity_id_update_subscriptions(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_id_update_discovery_update(hass, mqtt_mock):
"""Test MQTT discovery update when entity_id is updated."""
await help_test_entity_id_update_discovery_update(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_debug_info_message(hass, mqtt_mock):
"""Test MQTT debug info."""
config = {
light.DOMAIN: {
"platform": "mqtt",
"schema": "template",
"name": "test",
"command_topic": "test-topic",
"command_on_template": "on,{{ transition }}",
"command_off_template": "off,{{ transition|d }}",
"state_template": '{{ value.split(",")[0] }}',
}
}
await help_test_entity_debug_info_message(hass, mqtt_mock, light.DOMAIN, config)
async def test_max_mireds(hass, mqtt_mock):
"""Test setting min_mireds and max_mireds."""
config = {
light.DOMAIN: {
"platform": "mqtt",
"schema": "template",
"name": "test",
"command_topic": "test_max_mireds/set",
"command_on_template": "on",
"command_off_template": "off",
"color_temp_template": "{{ value }}",
"max_mireds": 370,
}
}
assert await async_setup_component(hass, light.DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.attributes.get("min_mireds") == 153
assert state.attributes.get("max_mireds") == 370
|
koku/koku/test_presto_delete_wrapper_trigger.py | cgoodfred/koku | 157 | 11073139 | import uuid
from datetime import datetime
from django.db import connection as conn
from pytz import UTC
from . import database as kdb
from api.iam.test.iam_test_case import IamTestCase
class TestPrestoDeleteLogTrigger(IamTestCase):
def test_presto_delete_log_table_exists(self):
"""
Ensure that the table and trigger exists
"""
with conn.cursor() as cur:
cur.execute(
"""
select count(*) as ct
from pg_class
where relnamespace = 'acct10001'::regnamespace
and relname = 'presto_delete_wrapper_log';
"""
)
self.assertTrue(bool(cur.fetchone()[0]))
cur.execute(
"""
select count(*) as ct
from pg_trigger
where tgname = 'tr_presto_before_insert'
and tgrelid = 'acct10001.presto_delete_wrapper_log'::regclass;
"""
)
self.assertTrue(bool(cur.fetchone()[0]))
def test_presto_delete_log_func_exists(self):
"""
Ensure that the presto delete wrapper trigger function exists
"""
func_schema = "public"
func_name = "tr_presto_delete_wrapper_log_action"
func_sig = f"{func_schema}.{func_name}()"
self.assertTrue(kdb.dbfunc_exists(conn, func_schema, func_name, func_sig))
def test_delete_log_action(self):
"""
Test that the trigger action will delete the specified records from the specified table
"""
# create test table with 20 rows
with conn.cursor() as cur:
cur.execute(
"""
create table acct10001.test_presto_delete as
select id::int, 'eek'::text "data"
from generate_series(1, 20) id;
"""
)
cur.execute("""select count(*) as ct from acct10001.test_presto_delete;""")
initial_row_count = cur.fetchone()[0]
self.assertEqual(initial_row_count, 20)
# delete 4 rows
# Out of range value on purpose
id = uuid.uuid4()
action_ts = datetime.now().replace(tzinfo=UTC)
target_table = "test_presto_delete"
where_clause = "where id in (1, 4, 7, 19, 77)"
with conn.cursor() as cur:
cur.execute(
"""
insert into acct10001.presto_delete_wrapper_log(id, action_ts, table_name, where_clause)
values(%s, %s, %s, %s);
""",
(id, action_ts, target_table, where_clause),
)
cur.execute("""select result_rows from acct10001.presto_delete_wrapper_log where id = %s;""", (id,))
result_rows = cur.fetchone()[0]
cur.execute("""select count(*) as ct from acct10001.test_presto_delete;""")
row_count = cur.fetchone()[0]
self.assertEqual(result_rows, 4)
self.assertEqual(row_count, (initial_row_count - result_rows))
|
paginator.py | Rongaming7777/PoketwoAuto | 183 | 11073146 | <filename>paginator.py
import asyncio
import discord
class Paginator:
def __init__(self, message, base, embeds, obj):
self.message = message
self.base = base
self.pointers = ['👈','👉','❌']
self.embeds = embeds
self.cursor = 0
self.obj = obj
async def _add_handler(self):
def reaction_check(reaction,user):
return user == self.message.author and reaction.message.id == self.base.id and reaction.emoji in self.pointers
while True:
reaction, user = await discord.Client.wait_for(self.obj, event='reaction_add', check=reaction_check)
op = self.pointers.index(reaction.emoji)
if op == 1 and self.cursor < len(self.embeds) - 1:
self.cursor += 1
await self.base.edit(embed=self.embeds[self.cursor])
elif op == 0 and self.cursor > 0:
self.cursor -= 1
await self.base.edit(embed=self.embeds[self.cursor])
elif op == 2:
await self.base.delete()
break
else:
pass
async def _remove_handler(self):
def reaction_check(reaction,user):
return user == self.message.author and reaction.message.id == self.base.id and reaction.emoji in self.pointers
while True:
reaction, user = await discord.Client.wait_for(self.obj, event='reaction_remove', check=reaction_check)
op = self.pointers.index(reaction.emoji)
if op == 1 and self.cursor < len(self.embeds) - 1:
self.cursor += 1
await self.base.edit(embed=self.embeds[self.cursor])
elif op == 0 and self.cursor > 0:
self.cursor -= 1
await self.base.edit(embed=self.embeds[self.cursor])
else:
pass
async def run(self):
await self.base.edit(content=self.message.author.mention,embed=self.embeds[0])
for pointer in self.pointers:
await self.base.add_reaction(pointer)
asyncio.ensure_future(self._add_handler())
asyncio.ensure_future(self._remove_handler())
class Embedder:
def __init__(self, image):
self.image = image
def generate(self, title, fields, current, total):
def _len_check(embed, field_name, description):
i = 5
content = description
while i > 0:
if len(content) <= 1024:
embed.add_field(name=field_name, value=content, inline=False)
break
else:
content = '\n'.join(content.split('\n\n')[:i])
i -= 1
hub_embed = discord.Embed(title=title,description="\u200B",color=15728640)
for name, content in fields.items():
_len_check(hub_embed, name, content)
hub_embed.set_footer(text="{}/{}".format(current, total),icon_url=self.image)
return hub_embed |
pyalgotrade/barfeed/dbfeed.py | cdyfng/pyalgotrade | 1,000 | 11073152 | # PyAlgoTrade
#
# Copyright 2011-2015 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. moduleauthor:: <NAME> <<EMAIL>>
"""
class Database(object):
def addBars(self, bars, frequency):
for instrument in bars.getInstruments():
bar = bars.getBar(instrument)
self.addBar(instrument, bar, frequency)
def addBarsFromFeed(self, feed):
for dateTime, bars in feed:
if bars:
self.addBars(bars, feed.getFrequency())
def addBar(self, instrument, bar, frequency):
raise NotImplementedError()
def getBars(self, instrument, frequency, timezone=None, fromDateTime=None, toDateTime=None):
raise NotImplementedError()
|
tests/routing/test_Routables.py | paradiseng/jasmin | 750 | 11073177 | from datetime import datetime
from twisted.trial.unittest import TestCase
from jasmin.routing.Routables import (SimpleRoutablePDU, RoutableSubmitSm,
RoutableDeliverSm, InvalidRoutableParameterError,
InvalidTagError, TagNotFoundError,
InvalidLockError)
from jasmin.routing.jasminApi import *
from smpp.pdu.operations import SubmitSM
class RoutablePDUTestCase(TestCase):
def setUp(self):
self.PDU = SubmitSM(
source_addr='20203060',
destination_addr='20203060',
short_message='hello world',
)
self.connector = Connector('abc')
self.user = User(1, Group(100), 'username', 'password')
class SimpleRoutablePDUTestCase(RoutablePDUTestCase):
def test_standard(self):
o = SimpleRoutablePDU(self.connector, self.PDU, self.user, datetime.now())
self.assertEqual(o.pdu, self.PDU)
self.assertEqual(o.connector.cid, self.connector.cid)
self.assertEqual(o.user.uid, self.user.uid)
self.assertEqual(o.user.group.gid, self.user.group.gid)
self.assertNotEqual(o.datetime, None)
def test_without_datetime(self):
o = SimpleRoutablePDU(self.connector, self.PDU, self.user)
self.assertNotEqual(o.datetime, None)
def test_invalid_parameter(self):
self.assertRaises(InvalidRoutableParameterError, SimpleRoutablePDU, self.connector, object, self.user)
self.assertRaises(InvalidRoutableParameterError, SimpleRoutablePDU, object, self.PDU, self.user)
self.assertRaises(InvalidRoutableParameterError, SimpleRoutablePDU, self.connector, self.PDU, object)
def test_tagging(self):
o = SimpleRoutablePDU(self.connector, self.PDU, self.user, datetime.now())
_any_object = object()
self.assertRaises(InvalidTagError, o.addTag, _any_object)
self.assertRaises(InvalidTagError, o.hasTag, _any_object)
self.assertRaises(InvalidTagError, o.removeTag, _any_object)
# Integer tags
o.addTag(23)
self.assertTrue(o.hasTag(23))
self.assertFalse(o.hasTag(30))
self.assertRaises(TagNotFoundError, o.removeTag, 30)
self.assertEqual(['23'], o.getTags())
o.flushTags()
self.assertEqual([], o.getTags())
# String tags
o.addTag('23')
self.assertTrue(o.hasTag('23'))
self.assertFalse(o.hasTag('30'))
self.assertRaises(TagNotFoundError, o.removeTag, '30')
self.assertEqual(['23'], o.getTags())
o.flushTags()
self.assertEqual([], o.getTags())
# Mixed tags
o.addTag('23')
o.addTag(24)
self.assertEqual(['23', '24'], o.getTags())
o.flushTags()
class RoutableSubmitSmTestCase(RoutablePDUTestCase):
def test_standard(self):
o = RoutableSubmitSm(self.PDU, self.user, datetime.now())
self.assertEqual(o.pdu, self.PDU)
self.assertEqual(o.user.uid, self.user.uid)
self.assertEqual(o.user.group.gid, self.user.group.gid)
self.assertNotEqual(o.datetime, None)
def test_without_datetime(self):
o = RoutableSubmitSm(self.PDU, self.user)
self.assertNotEqual(o.datetime, None)
def test_invalid_parameter(self):
self.assertRaises(InvalidRoutableParameterError, RoutableSubmitSm, object, self.user)
self.assertRaises(InvalidRoutableParameterError, RoutableSubmitSm, self.PDU, object)
def test_tagging(self):
o = RoutableSubmitSm(self.PDU, self.user, datetime.now())
_any_object = object()
self.assertRaises(InvalidTagError, o.addTag, _any_object)
self.assertRaises(InvalidTagError, o.hasTag, _any_object)
self.assertRaises(InvalidTagError, o.removeTag, _any_object)
# Integer tags
o.addTag(23)
self.assertTrue(o.hasTag(23))
self.assertFalse(o.hasTag(30))
self.assertRaises(TagNotFoundError, o.removeTag, 30)
self.assertEqual(['23'], o.getTags())
o.flushTags()
self.assertEqual([], o.getTags())
# String tags
o.addTag('23')
self.assertTrue(o.hasTag('23'))
self.assertFalse(o.hasTag('30'))
self.assertRaises(TagNotFoundError, o.removeTag, '30')
self.assertEqual(['23'], o.getTags())
o.flushTags()
self.assertEqual([], o.getTags())
# Mixed tags
o.addTag('23')
o.addTag(24)
self.assertEqual(['23', '24'], o.getTags())
o.flushTags()
def test_locking(self):
o = RoutableSubmitSm(self.PDU, self.user, datetime.now())
self.assertRaises(InvalidLockError, o.lockPduParam, 'anything')
self.assertRaises(InvalidLockError, o.pduParamIsLocked, 'anything')
o.lockPduParam('service_type')
self.assertTrue(o.pduParamIsLocked('service_type'))
self.assertFalse(o.pduParamIsLocked('source_addr_ton'))
class RoutableDeliverSmTestCase(RoutablePDUTestCase):
def test_standard(self):
o = RoutableDeliverSm(self.PDU, self.connector, datetime.now())
self.assertEqual(o.pdu, self.PDU)
self.assertEqual(o.connector.cid, self.connector.cid)
self.assertNotEqual(o.datetime, None)
def test_without_datetime(self):
o = RoutableSubmitSm(self.PDU, self.user)
self.assertNotEqual(o.datetime, None)
def test_invalid_parameter(self):
self.assertRaises(InvalidRoutableParameterError, RoutableDeliverSm, object, self.connector)
self.assertRaises(InvalidRoutableParameterError, RoutableDeliverSm, self.PDU, object)
def test_tagging(self):
o = RoutableDeliverSm(self.PDU, self.connector, datetime.now())
_any_object = object()
self.assertRaises(InvalidTagError, o.addTag, _any_object)
self.assertRaises(InvalidTagError, o.hasTag, _any_object)
self.assertRaises(InvalidTagError, o.removeTag, _any_object)
# Integer tags
o.addTag(23)
self.assertTrue(o.hasTag(23))
self.assertFalse(o.hasTag(30))
self.assertRaises(TagNotFoundError, o.removeTag, 30)
self.assertEqual(['23'], o.getTags())
o.flushTags()
self.assertEqual([], o.getTags())
# String tags
o.addTag('23')
self.assertTrue(o.hasTag('23'))
self.assertFalse(o.hasTag('30'))
self.assertRaises(TagNotFoundError, o.removeTag, '30')
self.assertEqual(['23'], o.getTags())
o.flushTags()
self.assertEqual([], o.getTags())
# Mixed tags
o.addTag('23')
o.addTag(24)
self.assertEqual(['23', '24'], o.getTags())
o.flushTags()
|
Algo and DSA/LeetCode-Solutions-master/Python/number-of-ways-to-split-a-string.py | Sourav692/FAANG-Interview-Preparation | 3,269 | 11073183 | # Time: O(n)
# Space: O(1)
class Solution(object):
def numWays(self, s):
"""
:type s: str
:rtype: int
"""
MOD = 10**9+7
ones = s.count('1')
if ones % 3:
return 0
ones //= 3
if ones == 0:
return (len(s)-1)*(len(s)-2)//2 % MOD
count = left = right = 0
for c in s:
if c == '1':
count += 1
if count == ones:
left += 1
elif count == 2*ones:
right += 1
return left*right % MOD
|
Python/cointoss.py | kennethsequeira/Hello-world | 1,428 | 11073194 | from bokeh.plotting import figure, output_file, show
import random
import math
initial_invesment = 100
y = []
for oo in range(0, 365):
if random.randint(0,1) % 2 == 0:
initial_invesment = initial_invesment + 0.5
y.append(initial_invesment)
else:
initial_invesment = initial_invesment - 0.5
y.append(initial_invesment)
# output to static HTML file
output_file("cointoss.html")
# create a new plot with a title and axis labels
p = figure(title="Performance", x_axis_label='days', y_axis_label='portfolio')
if y[0] < y[364]:
color = "green"
else:
color = "red"
p.line(range(0, 365), y, legend="Total Contribution", line_width=1, line_color=color)
# show the results
show(p) |
src/borg/crypto/nonces.py | alfredo08154711/borg | 8,680 | 11073205 | <reponame>alfredo08154711/borg<gh_stars>1000+
import os
import sys
from binascii import unhexlify
from ..helpers import get_security_dir
from ..helpers import bin_to_hex
from ..platform import SaveFile
from ..remote import InvalidRPCMethod
from .low_level import bytes_to_long, long_to_bytes
MAX_REPRESENTABLE_NONCE = 2**64 - 1
NONCE_SPACE_RESERVATION = 2**28 # This in units of AES blocksize (16 bytes)
class NonceManager:
def __init__(self, repository, manifest_nonce):
self.repository = repository
self.end_of_nonce_reservation = None
self.manifest_nonce = manifest_nonce
self.nonce_file = os.path.join(get_security_dir(self.repository.id_str), 'nonce')
def get_local_free_nonce(self):
try:
with open(self.nonce_file, 'r') as fd:
return bytes_to_long(unhexlify(fd.read()))
except FileNotFoundError:
return None
def commit_local_nonce_reservation(self, next_unreserved, start_nonce):
if self.get_local_free_nonce() != start_nonce:
raise Exception("nonce space reservation with mismatched previous state")
with SaveFile(self.nonce_file, binary=False) as fd:
fd.write(bin_to_hex(long_to_bytes(next_unreserved)))
def get_repo_free_nonce(self):
try:
return self.repository.get_free_nonce()
except InvalidRPCMethod:
# old server version, suppress further calls
sys.stderr.write("Please upgrade to borg version 1.1+ on the server for safer AES-CTR nonce handling.\n")
self.get_repo_free_nonce = lambda: None
self.commit_repo_nonce_reservation = lambda next_unreserved, start_nonce: None
return None
def commit_repo_nonce_reservation(self, next_unreserved, start_nonce):
self.repository.commit_nonce_reservation(next_unreserved, start_nonce)
def ensure_reservation(self, nonce, nonce_space_needed):
"""
Call this before doing encryption, give current, yet unused, integer IV as <nonce>
and the amount of subsequent (counter-like) IVs needed as <nonce_space_needed>.
Return value is the IV (counter) integer you shall use for encryption.
Note: this method may return the <nonce> you gave, if a reservation for it exists or
can be established, so make sure you give a unused nonce.
"""
# Nonces may never repeat, even if a transaction aborts or the system crashes.
# Therefore a part of the nonce space is reserved before any nonce is used for encryption.
# As these reservations are committed to permanent storage before any nonce is used, this protects
# against nonce reuse in crashes and transaction aborts. In that case the reservation still
# persists and the whole reserved space is never reused.
#
# Local storage on the client is used to protect against an attacker that is able to rollback the
# state of the server or can do arbitrary modifications to the repository.
# Storage on the server is used for the multi client use case where a transaction on client A is
# aborted and later client B writes to the repository.
#
# This scheme does not protect against attacker who is able to rollback the state of the server
# or can do arbitrary modifications to the repository in the multi client usecase.
if self.end_of_nonce_reservation:
# we already got a reservation, if nonce_space_needed still fits everything is ok
next_nonce = nonce
assert next_nonce <= self.end_of_nonce_reservation
if next_nonce + nonce_space_needed <= self.end_of_nonce_reservation:
return next_nonce
repo_free_nonce = self.get_repo_free_nonce()
local_free_nonce = self.get_local_free_nonce()
free_nonce_space = max(x for x in (repo_free_nonce, local_free_nonce, self.manifest_nonce, self.end_of_nonce_reservation) if x is not None)
reservation_end = free_nonce_space + nonce_space_needed + NONCE_SPACE_RESERVATION
assert reservation_end < MAX_REPRESENTABLE_NONCE
self.commit_repo_nonce_reservation(reservation_end, repo_free_nonce)
self.commit_local_nonce_reservation(reservation_end, local_free_nonce)
self.end_of_nonce_reservation = reservation_end
return free_nonce_space
|
packages/markdown-link-classes/setup.py | nicoleiocana/lektor-website | 184 | 11073228 | <filename>packages/markdown-link-classes/setup.py
from setuptools import setup
setup(
name='lektor-markdown-link-classes',
version='0.1',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
py_modules=['lektor_markdown_link_classes'],
url='http://github.com/lektor/lektor',
entry_points={
'lektor.plugins': [
'markdown-link-classes = lektor_markdown_link_classes:MarkdownLinkClassesPlugin',
]
}
)
|
libs/box_utils/iou.py | rickyHong/NAS_FPN_repl | 224 | 11073270 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
def iou_calculate(boxes_1, boxes_2):
with tf.name_scope('iou_caculate'):
xmin_1, ymin_1, xmax_1, ymax_1 = boxes_1[:, 0], boxes_1[:, 1], boxes_1[:, 2], boxes_1[:, 3]
xmin_2, ymin_2, xmax_2, ymax_2 = boxes_2[:, 0], boxes_2[:, 1], boxes_2[:, 2], boxes_2[:, 3]
max_xmin = tf.maximum(xmin_1, xmin_2)
min_xmax = tf.minimum(xmax_1, xmax_2)
max_ymin = tf.maximum(ymin_1, ymin_2)
min_ymax = tf.minimum(ymax_1, ymax_2)
overlap_h = tf.maximum(0., min_ymax - max_ymin) # avoid h < 0
overlap_w = tf.maximum(0., min_xmax - max_xmin)
overlaps = overlap_h * overlap_w
area_1 = (xmax_1 - xmin_1) * (ymax_1 - ymin_1) # [N, 1]
area_2 = (xmax_2 - xmin_2) * (ymax_2 - ymin_2) # [M, ]
iou = overlaps / (area_1 + area_2 - overlaps)
return iou
def iou_calculate1(boxes_1, boxes_2):
xmin_1, ymin_1, xmax_1, ymax_1 = boxes_1[:, 0], boxes_1[:, 1], boxes_1[:, 2], boxes_1[:, 3]
xmin_2, ymin_2, xmax_2, ymax_2 = boxes_2[:, 0], boxes_2[:, 1], boxes_2[:, 2], boxes_2[:, 3]
max_xmin = np.maximum(xmin_1, xmin_2)
min_xmax = np.minimum(xmax_1, xmax_2)
max_ymin = np.maximum(ymin_1, ymin_2)
min_ymax = np.minimum(ymax_1, ymax_2)
overlap_h = np.maximum(0., min_ymax - max_ymin) # avoid h < 0
overlap_w = np.maximum(0., min_xmax - max_xmin)
overlaps = overlap_h * overlap_w
area_1 = (xmax_1 - xmin_1) * (ymax_1 - ymin_1) # [N, 1]
area_2 = (xmax_2 - xmin_2) * (ymax_2 - ymin_2) # [M, ]
iou = overlaps / (area_1 + area_2 - overlaps)
return iou
if __name__ == '__main__':
import os
os.environ["CUDA_VISIBLE_DEVICES"] = '13'
boxes1 = np.array([[50, 50, 100, 300],
[60, 60, 100, 200]], np.float32)
boxes2 = np.array([[50, 50, 100, 300],
[200, 200, 100, 200]], np.float32)
print(iou_calculate1(boxes1, boxes2))
|
river/checks/reco.py | michaelchiucw/river | 2,184 | 11073278 | <gh_stars>1000+
import random
def check_reco_routine(recommender):
users = ["Tom", "Anna"]
items = {"politics", "sports", "music", "food", "finance", "health", "camping"}
def get_reward(user, item) -> bool:
if user == "Tom":
return item in {"music", "politics"}
if user == "Anna":
return item in {"politics", "sports"}
for i in range(100):
user = random.choice(users)
item = recommender.recommend(user, k=1, items=items, strategy="best")[0]
clicked = get_reward(user, item)
recommender.learn_one({"user": user, "item": item}, clicked)
|
jesse/indicators/ift_rsi.py | slipperlobster/flipper | 3,999 | 11073287 | from typing import Union
import talib
import numpy as np
from jesse.helpers import get_candle_source, slice_candles, same_length
def ift_rsi(candles: np.ndarray, rsi_period: int = 5, wma_period: int =9, source_type: str = "close", sequential: bool = False) -> Union[
float, np.ndarray]:
"""
Modified Inverse Fisher Transform applied on RSI
:param candles: np.ndarray
:param rsi_period: int - default: 5
:param wma_period: int - default: 9
:param source_type: str - default: "close"
:param sequential: bool - default: False
:return: float | np.ndarray
"""
candles = slice_candles(candles, sequential)
source = get_candle_source(candles, source_type=source_type)
v1 = 0.1 * (talib.RSI(source, rsi_period) - 50)
v2 = talib.WMA(v1, wma_period)
res = (((2*v2) ** 2 - 1) / ((2*v2) ** 2 + 1))
return same_length(candles, res) if sequential else res[-1]
|
examples/plot_model_selection1.py | time-series-tools/seglearn | 509 | 11073299 | """
==========================
Hyperparameter Selection 1
==========================
This example demonstrates how to do model selection in a feature representation pipeline using a grid search
"""
# Author: <NAME>
# License: BSD
import matplotlib.pyplot as plt
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV, GroupKFold
from sklearn.preprocessing import StandardScaler
import seglearn as sgl
def plot_grid_search(cv_results, grid_param_1, grid_param_2, name_param_1, name_param_2):
# plotting grid results from David Alvarez on Stack Overflow
# Get Test Scores Mean and std for each grid search
scores_mean = cv_results['mean_test_score']
scores_mean = np.array(scores_mean).reshape(len(grid_param_2), len(grid_param_1))
scores_sd = cv_results['std_test_score']
scores_sd = np.array(scores_sd).reshape(len(grid_param_2), len(grid_param_1))
# Plot Grid search scores
_, ax = plt.subplots(1, 1)
# Param1 is the X-axis, Param 2 is represented as a different curve (color line)
for idx, val in enumerate(grid_param_2):
ax.plot(grid_param_1, scores_mean[idx, :], '-o', label=name_param_2 + ': ' + str(val))
ax.set_title("Grid Search Scores", fontsize=20, fontweight='bold')
ax.set_xlabel(name_param_1, fontsize=16)
ax.set_ylabel('CV Average Score', fontsize=16)
ax.legend(loc="best", fontsize=15)
ax.grid(True)
# load the data
data = sgl.load_watch()
X = data['X']
y = data['y']
g = data['subject']
# use subject id to group folds
splitter = GroupKFold(n_splits=3)
cv = splitter.split(X, y, groups=g)
# create a feature representation pipeline
pipe = sgl.Pype([('seg', sgl.Segment()),
('features', sgl.FeatureRep()),
('scaler', StandardScaler()),
('rf', RandomForestClassifier())])
# create a parameter dictionary using the sklearn API
# note that if you want to set a parameter to a single value, it will still need to be as a list
par_grid = {'seg__width': [50, 100, 200],
'seg__overlap': [0., 0.5],
'rf__n_estimators': [20]}
clf = GridSearchCV(pipe, par_grid, cv=cv)
clf.fit(X, y)
plot_grid_search(clf.cv_results_, par_grid['seg__width'],
par_grid['seg__overlap'], 'width', 'overlap')
plt.show()
|
tests/samplers_tests/tpe_tests/test_multi_objective_sampler.py | knshnb/optuna | 1,300 | 11073306 | <reponame>knshnb/optuna<gh_stars>1000+
import itertools
import random
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
from unittest.mock import patch
from unittest.mock import PropertyMock
import numpy as np
import pytest
import optuna
from optuna.samplers import _tpe
from optuna.samplers import TPESampler
class MockSystemAttr:
def __init__(self) -> None:
self.value = {} # type: Dict[str, dict]
def set_trial_system_attr(self, _: int, key: str, value: dict) -> None:
self.value[key] = value
def test_multi_objective_sample_independent_seed_fix() -> None:
study = optuna.create_study(directions=["minimize", "maximize"])
dist = optuna.distributions.FloatDistribution(1.0, 100.0)
random.seed(128)
past_trials = [frozen_trial_factory(i, [random.random(), random.random()]) for i in range(16)]
# Prepare a trial and a sample for later checks.
trial = frozen_trial_factory(16, [0, 0])
sampler = TPESampler(seed=0)
attrs = MockSystemAttr()
with patch.object(study._storage, "get_all_trials", return_value=past_trials), patch.object(
study._storage, "set_trial_system_attr", side_effect=attrs.set_trial_system_attr
), patch.object(study._storage, "get_trial", return_value=trial), patch(
"optuna.trial.Trial.system_attrs", new_callable=PropertyMock
) as mock1, patch(
"optuna.trial.FrozenTrial.system_attrs",
new_callable=PropertyMock,
) as mock2:
mock1.return_value = attrs.value
mock2.return_value = attrs.value
suggestion = sampler.sample_independent(study, trial, "param-a", dist)
sampler = TPESampler(seed=0)
attrs = MockSystemAttr()
with patch.object(study._storage, "get_all_trials", return_value=past_trials), patch.object(
study._storage, "set_trial_system_attr", side_effect=attrs.set_trial_system_attr
), patch.object(study._storage, "get_trial", return_value=trial), patch(
"optuna.trial.Trial.system_attrs", new_callable=PropertyMock
) as mock1, patch(
"optuna.trial.FrozenTrial.system_attrs",
new_callable=PropertyMock,
) as mock2:
mock1.return_value = attrs.value
mock2.return_value = attrs.value
assert sampler.sample_independent(study, trial, "param-a", dist) == suggestion
sampler = TPESampler(seed=1)
attrs = MockSystemAttr()
with patch.object(study._storage, "get_all_trials", return_value=past_trials), patch.object(
study._storage, "set_trial_system_attr", side_effect=attrs.set_trial_system_attr
), patch.object(study._storage, "get_trial", return_value=trial), patch(
"optuna.trial.Trial.system_attrs", new_callable=PropertyMock
) as mock1, patch(
"optuna.trial.FrozenTrial.system_attrs",
new_callable=PropertyMock,
) as mock2:
mock1.return_value = attrs.value
mock2.return_value = attrs.value
assert sampler.sample_independent(study, trial, "param-a", dist) != suggestion
def test_multi_objective_sample_independent_prior() -> None:
study = optuna.create_study(directions=["minimize", "maximize"])
dist = optuna.distributions.FloatDistribution(1.0, 100.0)
random.seed(128)
past_trials = [frozen_trial_factory(i, [random.random(), random.random()]) for i in range(16)]
# Prepare a trial and a sample for later checks.
trial = frozen_trial_factory(16, [0, 0])
sampler = TPESampler(seed=0)
attrs = MockSystemAttr()
with patch.object(study._storage, "get_all_trials", return_value=past_trials), patch.object(
study._storage, "set_trial_system_attr", side_effect=attrs.set_trial_system_attr
), patch.object(study._storage, "get_trial", return_value=trial), patch(
"optuna.trial.Trial.system_attrs", new_callable=PropertyMock
) as mock1, patch(
"optuna.trial.FrozenTrial.system_attrs",
new_callable=PropertyMock,
) as mock2:
mock1.return_value = attrs.value
mock2.return_value = attrs.value
suggestion = sampler.sample_independent(study, trial, "param-a", dist)
sampler = TPESampler(consider_prior=False, seed=0)
attrs = MockSystemAttr()
with patch.object(study._storage, "get_all_trials", return_value=past_trials), patch.object(
study._storage, "set_trial_system_attr", side_effect=attrs.set_trial_system_attr
), patch.object(study._storage, "get_trial", return_value=trial), patch(
"optuna.trial.Trial.system_attrs", new_callable=PropertyMock
) as mock1, patch(
"optuna.trial.FrozenTrial.system_attrs",
new_callable=PropertyMock,
) as mock2:
mock1.return_value = attrs.value
mock2.return_value = attrs.value
assert sampler.sample_independent(study, trial, "param-a", dist) != suggestion
sampler = TPESampler(prior_weight=0.5, seed=0)
attrs = MockSystemAttr()
with patch.object(study._storage, "get_all_trials", return_value=past_trials), patch.object(
study._storage, "set_trial_system_attr", side_effect=attrs.set_trial_system_attr
), patch.object(study._storage, "get_trial", return_value=trial), patch(
"optuna.trial.Trial.system_attrs", new_callable=PropertyMock
) as mock1, patch(
"optuna.trial.FrozenTrial.system_attrs",
new_callable=PropertyMock,
) as mock2:
mock1.return_value = attrs.value
mock2.return_value = attrs.value
assert sampler.sample_independent(study, trial, "param-a", dist) != suggestion
def test_multi_objective_sample_independent_n_startup_trial() -> None:
study = optuna.create_study(directions=["minimize", "maximize"])
dist = optuna.distributions.FloatDistribution(1.0, 100.0)
random.seed(128)
past_trials = [frozen_trial_factory(i, [random.random(), random.random()]) for i in range(16)]
trial = frozen_trial_factory(16, [0, 0])
sampler = TPESampler(n_startup_trials=16, seed=0)
attrs = MockSystemAttr()
with patch.object(
study._storage, "get_all_trials", return_value=past_trials[:15]
), patch.object(
study._storage, "set_trial_system_attr", side_effect=attrs.set_trial_system_attr
), patch.object(
study._storage, "get_trial", return_value=trial
), patch(
"optuna.trial.Trial.system_attrs", new_callable=PropertyMock
) as mock1, patch(
"optuna.trial.FrozenTrial.system_attrs",
new_callable=PropertyMock,
) as mock2, patch.object(
optuna.samplers.RandomSampler,
"sample_independent",
return_value=1.0,
) as sample_method:
mock1.return_value = attrs.value
mock2.return_value = attrs.value
sampler.sample_independent(study, trial, "param-a", dist)
assert sample_method.call_count == 1
sampler = TPESampler(n_startup_trials=16, seed=0)
attrs = MockSystemAttr()
with patch.object(study._storage, "get_all_trials", return_value=past_trials), patch.object(
study._storage, "set_trial_system_attr", side_effect=attrs.set_trial_system_attr
), patch.object(study._storage, "get_trial", return_value=trial), patch(
"optuna.trial.Trial.system_attrs", new_callable=PropertyMock
) as mock1, patch(
"optuna.trial.FrozenTrial.system_attrs",
new_callable=PropertyMock,
) as mock2, patch.object(
optuna.samplers.RandomSampler,
"sample_independent",
return_value=1.0,
) as sample_method:
mock1.return_value = attrs.value
mock2.return_value = attrs.value
sampler.sample_independent(study, trial, "param-a", dist)
assert sample_method.call_count == 0
def test_multi_objective_sample_independent_misc_arguments() -> None:
study = optuna.create_study(directions=["minimize", "maximize"])
dist = optuna.distributions.FloatDistribution(1.0, 100.0)
random.seed(128)
past_trials = [frozen_trial_factory(i, [random.random(), random.random()]) for i in range(32)]
# Prepare a trial and a sample for later checks.
trial = frozen_trial_factory(16, [0, 0])
sampler = TPESampler(seed=0)
attrs = MockSystemAttr()
with patch.object(study._storage, "get_all_trials", return_value=past_trials), patch.object(
study._storage, "set_trial_system_attr", side_effect=attrs.set_trial_system_attr
), patch.object(study._storage, "get_trial", return_value=trial), patch(
"optuna.trial.Trial.system_attrs", new_callable=PropertyMock
) as mock1, patch(
"optuna.trial.FrozenTrial.system_attrs",
new_callable=PropertyMock,
) as mock2:
mock1.return_value = attrs.value
mock2.return_value = attrs.value
suggestion = sampler.sample_independent(study, trial, "param-a", dist)
# Test misc. parameters.
sampler = TPESampler(n_ei_candidates=13, seed=0)
attrs = MockSystemAttr()
with patch.object(study._storage, "get_all_trials", return_value=past_trials), patch.object(
study._storage, "set_trial_system_attr", side_effect=attrs.set_trial_system_attr
), patch.object(study._storage, "get_trial", return_value=trial), patch(
"optuna.trial.Trial.system_attrs", new_callable=PropertyMock
) as mock1, patch(
"optuna.trial.FrozenTrial.system_attrs",
new_callable=PropertyMock,
) as mock2:
mock1.return_value = attrs.value
mock2.return_value = attrs.value
assert sampler.sample_independent(study, trial, "param-a", dist) != suggestion
sampler = TPESampler(gamma=lambda _: 1, seed=0)
attrs = MockSystemAttr()
with patch.object(study._storage, "get_all_trials", return_value=past_trials), patch.object(
study._storage, "set_trial_system_attr", side_effect=attrs.set_trial_system_attr
), patch.object(study._storage, "get_trial", return_value=trial), patch(
"optuna.trial.Trial.system_attrs", new_callable=PropertyMock
) as mock1, patch(
"optuna.trial.FrozenTrial.system_attrs",
new_callable=PropertyMock,
) as mock2:
mock1.return_value = attrs.value
mock2.return_value = attrs.value
assert sampler.sample_independent(study, trial, "param-a", dist) != suggestion
sampler = TPESampler(weights=lambda n: np.zeros(n), seed=0)
attrs = MockSystemAttr()
with patch.object(study._storage, "get_all_trials", return_value=past_trials), patch.object(
study._storage, "set_trial_system_attr", side_effect=attrs.set_trial_system_attr
), patch.object(study._storage, "get_trial", return_value=trial), patch(
"optuna.trial.Trial.system_attrs", new_callable=PropertyMock
) as mock1, patch(
"optuna.trial.FrozenTrial.system_attrs",
new_callable=PropertyMock,
) as mock2:
mock1.return_value = attrs.value
mock2.return_value = attrs.value
assert sampler.sample_independent(study, trial, "param-a", dist) != suggestion
@pytest.mark.parametrize(
"log, step",
[
(False, None),
(True, None),
(False, 0.1),
],
)
def test_multi_objective_sample_independent_float_distributions(
log: bool, step: Optional[float]
) -> None:
# Prepare sample from float distribution for checking other distributions.
study = optuna.create_study(directions=["minimize", "maximize"])
random.seed(128)
float_dist = optuna.distributions.FloatDistribution(1.0, 100.0, log=log, step=step)
if float_dist.step:
value_fn: Optional[Callable[[int], float]] = (
lambda number: int(random.random() * 1000) * 0.1
)
else:
value_fn = None
past_trials = [
frozen_trial_factory(
i, [random.random(), random.random()], dist=float_dist, value_fn=value_fn
)
for i in range(16)
]
trial = frozen_trial_factory(16, [0, 0])
sampler = TPESampler(seed=0)
attrs = MockSystemAttr()
with patch.object(study._storage, "get_all_trials", return_value=past_trials), patch.object(
study._storage, "set_trial_system_attr", side_effect=attrs.set_trial_system_attr
), patch.object(study._storage, "get_trial", return_value=trial), patch(
"optuna.trial.Trial.system_attrs", new_callable=PropertyMock
) as mock1, patch(
"optuna.trial.FrozenTrial.system_attrs",
new_callable=PropertyMock,
) as mock2:
mock1.return_value = attrs.value
mock2.return_value = attrs.value
float_suggestion = sampler.sample_independent(study, trial, "param-a", float_dist)
assert 1.0 <= float_suggestion < 100.0
if float_dist.step == 0.1:
assert abs(int(float_suggestion * 10) - float_suggestion * 10) < 1e-3
# Test sample is different when `float_dist.log` is True or float_dist.step != 1.0.
random.seed(128)
dist = optuna.distributions.FloatDistribution(1.0, 100.0)
past_trials = [frozen_trial_factory(i, [random.random(), random.random()]) for i in range(16)]
trial = frozen_trial_factory(16, [0, 0])
sampler = TPESampler(seed=0)
attrs = MockSystemAttr()
with patch.object(study._storage, "get_all_trials", return_value=past_trials), patch.object(
study._storage, "set_trial_system_attr", side_effect=attrs.set_trial_system_attr
), patch.object(study._storage, "get_trial", return_value=trial), patch(
"optuna.trial.Trial.system_attrs", new_callable=PropertyMock
) as mock1, patch(
"optuna.trial.FrozenTrial.system_attrs",
new_callable=PropertyMock,
) as mock2:
mock1.return_value = attrs.value
mock2.return_value = attrs.value
suggestion = sampler.sample_independent(study, trial, "param-a", dist)
if float_dist.log or float_dist.step == 0.1:
assert float_suggestion != suggestion
else:
assert float_suggestion == suggestion
def test_multi_objective_sample_independent_categorical_distributions() -> None:
"""Test samples are drawn from the specified category."""
study = optuna.create_study(directions=["minimize", "maximize"])
random.seed(128)
categories = [i * 0.3 + 1.0 for i in range(330)]
def cat_value_fn(idx: int) -> float:
return categories[random.randint(0, len(categories) - 1)]
cat_dist = optuna.distributions.CategoricalDistribution(categories)
past_trials = [
frozen_trial_factory(
i, [random.random(), random.random()], dist=cat_dist, value_fn=cat_value_fn
)
for i in range(16)
]
trial = frozen_trial_factory(16, [0, 0])
sampler = TPESampler(seed=0)
attrs = MockSystemAttr()
with patch.object(study._storage, "get_all_trials", return_value=past_trials), patch.object(
study._storage, "set_trial_system_attr", side_effect=attrs.set_trial_system_attr
), patch.object(study._storage, "get_trial", return_value=trial), patch(
"optuna.trial.Trial.system_attrs", new_callable=PropertyMock
) as mock1, patch(
"optuna.trial.FrozenTrial.system_attrs",
new_callable=PropertyMock,
) as mock2:
mock1.return_value = attrs.value
mock2.return_value = attrs.value
categorical_suggestion = sampler.sample_independent(study, trial, "param-a", cat_dist)
assert categorical_suggestion in categories
@pytest.mark.parametrize(
"log, step",
[
(False, 1),
(True, 1),
(False, 2),
],
)
def test_multi_objective_sample_int_distributions(log: bool, step: int) -> None:
"""Test sampling from int distribution returns integer."""
study = optuna.create_study(directions=["minimize", "maximize"])
random.seed(128)
def int_value_fn(idx: int) -> float:
return random.randint(1, 100)
int_dist = optuna.distributions.IntDistribution(1, 100, log, step)
past_trials = [
frozen_trial_factory(
i, [random.random(), random.random()], dist=int_dist, value_fn=int_value_fn
)
for i in range(16)
]
trial = frozen_trial_factory(16, [0, 0])
sampler = TPESampler(seed=0)
attrs = MockSystemAttr()
with patch.object(study._storage, "get_all_trials", return_value=past_trials), patch.object(
study._storage, "set_trial_system_attr", side_effect=attrs.set_trial_system_attr
), patch.object(study._storage, "get_trial", return_value=trial), patch(
"optuna.trial.Trial.system_attrs", new_callable=PropertyMock
) as mock1, patch(
"optuna.trial.FrozenTrial.system_attrs",
new_callable=PropertyMock,
) as mock2:
mock1.return_value = attrs.value
mock2.return_value = attrs.value
int_suggestion = sampler.sample_independent(study, trial, "param-a", int_dist)
assert 1 <= int_suggestion <= 100
assert isinstance(int_suggestion, int)
@pytest.mark.parametrize(
"state",
[
(optuna.trial.TrialState.FAIL,),
(optuna.trial.TrialState.PRUNED,),
(optuna.trial.TrialState.RUNNING,),
(optuna.trial.TrialState.WAITING,),
],
)
def test_multi_objective_sample_independent_handle_unsuccessful_states(
state: optuna.trial.TrialState,
) -> None:
study = optuna.create_study(directions=["minimize", "maximize"])
dist = optuna.distributions.FloatDistribution(1.0, 100.0)
random.seed(128)
# Prepare sampling result for later tests.
past_trials = [frozen_trial_factory(i, [random.random(), random.random()]) for i in range(32)]
trial = frozen_trial_factory(32, [0, 0])
sampler = TPESampler(seed=0)
attrs = MockSystemAttr()
with patch.object(study._storage, "get_all_trials", return_value=past_trials), patch.object(
study._storage, "set_trial_system_attr", side_effect=attrs.set_trial_system_attr
), patch.object(study._storage, "get_trial", return_value=trial), patch(
"optuna.trial.Trial.system_attrs", new_callable=PropertyMock
) as mock1, patch(
"optuna.trial.FrozenTrial.system_attrs",
new_callable=PropertyMock,
) as mock2:
mock1.return_value = attrs.value
mock2.return_value = attrs.value
all_success_suggestion = sampler.sample_independent(study, trial, "param-a", dist)
# Test unsuccessful trials are handled differently.
state_fn = build_state_fn(state)
past_trials = [
frozen_trial_factory(i, [random.random(), random.random()], state_fn=state_fn)
for i in range(32)
]
trial = frozen_trial_factory(32, [0, 0])
sampler = TPESampler(seed=0)
attrs = MockSystemAttr()
with patch.object(study._storage, "get_all_trials", return_value=past_trials), patch.object(
study._storage, "set_trial_system_attr", side_effect=attrs.set_trial_system_attr
), patch.object(study._storage, "get_trial", return_value=trial), patch(
"optuna.trial.Trial.system_attrs", new_callable=PropertyMock
) as mock1, patch(
"optuna.trial.FrozenTrial.system_attrs",
new_callable=PropertyMock,
) as mock2:
mock1.return_value = attrs.value
mock2.return_value = attrs.value
partial_unsuccessful_suggestion = sampler.sample_independent(study, trial, "param-a", dist)
assert partial_unsuccessful_suggestion != all_success_suggestion
def test_multi_objective_sample_independent_ignored_states() -> None:
"""Tests FAIL, RUNNING, and WAITING states are equally."""
study = optuna.create_study(directions=["minimize", "maximize"])
dist = optuna.distributions.FloatDistribution(1.0, 100.0)
suggestions = []
for state in [
optuna.trial.TrialState.FAIL,
optuna.trial.TrialState.RUNNING,
optuna.trial.TrialState.WAITING,
]:
random.seed(128)
state_fn = build_state_fn(state)
past_trials = [
frozen_trial_factory(i, [random.random(), random.random()], state_fn=state_fn)
for i in range(32)
]
trial = frozen_trial_factory(32, [0, 0])
sampler = TPESampler(seed=0)
attrs = MockSystemAttr()
with patch.object(study._storage, "get_all_trials", return_value=past_trials), patch.object(
study._storage, "set_trial_system_attr", side_effect=attrs.set_trial_system_attr
), patch.object(study._storage, "get_trial", return_value=trial), patch(
"optuna.trial.Trial.system_attrs", new_callable=PropertyMock
) as mock1, patch(
"optuna.trial.FrozenTrial.system_attrs",
new_callable=PropertyMock,
) as mock2:
mock1.return_value = attrs.value
mock2.return_value = attrs.value
suggestions.append(sampler.sample_independent(study, trial, "param-a", dist))
assert len(set(suggestions)) == 1
def test_multi_objective_get_observation_pairs() -> None:
def objective(trial: optuna.trial.Trial) -> Tuple[float, float]:
trial.suggest_int("x", 5, 5)
return 5.0, 5.0
sampler = TPESampler(seed=0)
study = optuna.create_study(directions=["minimize", "maximize"], sampler=sampler)
study.optimize(objective, n_trials=5)
assert _tpe.sampler._get_observation_pairs(study, ["x"], False) == (
{"x": [5.0, 5.0, 5.0, 5.0, 5.0]},
[(-float("inf"), [5.0, -5.0]) for _ in range(5)],
)
assert _tpe.sampler._get_observation_pairs(study, ["y"], False) == (
{"y": [None, None, None, None, None]},
[(-float("inf"), [5.0, -5.0]) for _ in range(5)],
)
assert _tpe.sampler._get_observation_pairs(study, ["x"], True) == (
{"x": [5.0, 5.0, 5.0, 5.0, 5.0]},
[(-float("inf"), [5.0, -5.0]) for _ in range(5)],
)
assert _tpe.sampler._get_observation_pairs(study, ["y"], True) == ({"y": []}, [])
def test_calculate_nondomination_rank() -> None:
# Single objective
test_case = np.asarray([[10], [20], [20], [30]])
ranks = list(_tpe.sampler._calculate_nondomination_rank(test_case))
assert ranks == [0, 1, 1, 2]
# Two objectives
test_case = np.asarray([[10, 30], [10, 10], [20, 20], [30, 10], [15, 15]])
ranks = list(_tpe.sampler._calculate_nondomination_rank(test_case))
assert ranks == [1, 0, 2, 1, 1]
# Three objectives
test_case = np.asarray([[5, 5, 4], [5, 5, 5], [9, 9, 0], [5, 7, 5], [0, 0, 9], [0, 9, 9]])
ranks = list(_tpe.sampler._calculate_nondomination_rank(test_case))
assert ranks == [0, 1, 0, 2, 0, 1]
def test_calculate_weights_below_for_multi_objective() -> None:
# Two samples.
weights_below = _tpe.sampler._calculate_weights_below_for_multi_objective(
{"x": np.array([1.0, 2.0, 3.0], dtype=float)},
[(0, [0.2, 0.5]), (0, [0.9, 0.4]), (0, [1, 1])],
np.array([0, 1]),
)
assert len(weights_below) == 2
assert weights_below[0] > weights_below[1]
assert sum(weights_below) > 0
# Two equally contributed samples.
weights_below = _tpe.sampler._calculate_weights_below_for_multi_objective(
{"x": np.array([1.0, 2.0, 3.0], dtype=float)},
[(0, [0.2, 0.8]), (0, [0.8, 0.2]), (0, [1, 1])],
np.array([0, 1]),
)
assert len(weights_below) == 2
assert weights_below[0] == weights_below[1]
assert sum(weights_below) > 0
# Duplicated samples.
weights_below = _tpe.sampler._calculate_weights_below_for_multi_objective(
{"x": np.array([1.0, 2.0, 3.0], dtype=float)},
[(0, [0.2, 0.8]), (0, [0.2, 0.8]), (0, [1, 1])],
np.array([0, 1]),
)
assert len(weights_below) == 2
assert weights_below[0] == weights_below[1]
assert sum(weights_below) > 0
# Three samples.
weights_below = _tpe.sampler._calculate_weights_below_for_multi_objective(
{"x": np.array([1.0, 2.0, 3.0, 4.0], dtype=float)},
[(0, [0.3, 0.3]), (0, [0.2, 0.8]), (0, [0.8, 0.2]), (0, [1, 1])],
np.array([0, 1, 2]),
)
assert len(weights_below) == 3
assert weights_below[0] > weights_below[1]
assert weights_below[0] > weights_below[2]
assert weights_below[1] == weights_below[2]
assert sum(weights_below) > 0
def test_solve_hssp() -> None:
random.seed(128)
# Two dimensions
for i in range(8):
subset_size = int(random.random() * i) + 1
test_case = np.asarray([[random.random(), random.random()] for _ in range(8)])
r = 1.1 * np.max(test_case, axis=0)
truth = 0.0
for subset in itertools.permutations(test_case, subset_size):
truth = max(truth, _tpe.sampler._compute_hypervolume(np.asarray(subset), r))
indices = _tpe.sampler._solve_hssp(test_case, np.arange(len(test_case)), subset_size, r)
approx = _tpe.sampler._compute_hypervolume(test_case[indices], r)
assert approx / truth > 0.6321 # 1 - 1/e
# Three dimensions
for i in range(8):
subset_size = int(random.random() * i) + 1
test_case = np.asarray(
[[random.random(), random.random(), random.random()] for _ in range(8)]
)
r = 1.1 * np.max(test_case, axis=0)
truth = 0
for subset in itertools.permutations(test_case, subset_size):
truth = max(truth, _tpe.sampler._compute_hypervolume(np.asarray(subset), r))
indices = _tpe.sampler._solve_hssp(test_case, np.arange(len(test_case)), subset_size, r)
approx = _tpe.sampler._compute_hypervolume(test_case[indices], r)
assert approx / truth > 0.6321 # 1 - 1/e
def frozen_trial_factory(
number: int,
values: List[float],
dist: optuna.distributions.BaseDistribution = optuna.distributions.FloatDistribution(
1.0, 100.0
),
value_fn: Optional[Callable[[int], Union[int, float]]] = None,
state_fn: Callable[
[int], optuna.trial.TrialState
] = lambda _: optuna.trial.TrialState.COMPLETE,
) -> optuna.trial.FrozenTrial:
if value_fn is None:
value = random.random() * 99.0 + 1.0
else:
value = value_fn(number)
trial = optuna.trial.FrozenTrial(
number=number,
trial_id=number,
state=optuna.trial.TrialState.COMPLETE,
value=None,
datetime_start=None,
datetime_complete=None,
params={"param-a": value},
distributions={"param-a": dist},
user_attrs={},
system_attrs={},
intermediate_values={},
values=values,
)
return trial
def build_state_fn(state: optuna.trial.TrialState) -> Callable[[int], optuna.trial.TrialState]:
def state_fn(idx: int) -> optuna.trial.TrialState:
return [optuna.trial.TrialState.COMPLETE, state][idx % 2]
return state_fn
|
nglview/stage.py | tovrstra/nglview | 161 | 11073321 | from .utils.py_utils import _camelize_dict
class Stage:
"""
Try to mimic NGL.Stage
"""
def __init__(self, view):
self._view = view
def set_parameters(self, **kwargs):
self._view._remote_call('setParameters',
target='Stage',
kwargs=_camelize_dict(kwargs))
|
pyproj/crs/__init__.py | psydox/pyproj | 467 | 11073340 | """
This module interfaces with PROJ to produce a pythonic interface
to the coordinate reference system (CRS) information through the CRS
class.
"""
from pyproj._crs import ( # noqa: F401 pylint: disable=unused-import
CoordinateOperation,
CoordinateSystem,
Datum,
Ellipsoid,
PrimeMeridian,
is_proj,
is_wkt,
)
from pyproj.crs.crs import ( # noqa: F401 pylint: disable=unused-import
CRS,
BoundCRS,
CompoundCRS,
CustomConstructorCRS,
DerivedGeographicCRS,
GeocentricCRS,
GeographicCRS,
ProjectedCRS,
VerticalCRS,
)
from pyproj.exceptions import CRSError # noqa: F401 pylint: disable=unused-import
|
rotypes/Windows/Graphics/Imaging/__init__.py | Sait0Yuuki/ArknightsAutoHelper | 1,035 | 11073349 | <reponame>Sait0Yuuki/ArknightsAutoHelper
from ctypes import c_int, c_int32
from rotypes.Windows.Foundation import IClosable
from rotypes.inspectable import IInspectable
from rotypes.idldsl import define_winrt_com_method, _static_method, runtimeclass
import rotypes.Windows.Storage.Streams
class ISoftwareBitmap(IClosable, IInspectable):
IID = '689e0708-7eef-483f-963f-da938818e073'
class BitmapPixelFormat(c_int):
Rgba8 = 30
class BitmapAlphaMode(c_int):
Straight = 1
class ISoftwareBitmapStatics(IInspectable):
IID = 'DF0385DB-672F-4A9D-806E-C2442F343E86'
class SoftwareBitmap(runtimeclass, ISoftwareBitmap):
CreateCopyWithAlphaFromBuffer = _static_method(ISoftwareBitmapStatics, 'CreateCopyWithAlphaFromBuffer')
define_winrt_com_method(ISoftwareBitmapStatics, "CreateCopyWithAlphaFromBuffer",
rotypes.Windows.Storage.Streams.IBuffer, BitmapPixelFormat, c_int32, c_int32, BitmapAlphaMode,
retval=SoftwareBitmap, vtbl=10)
|
plaso/cli/helpers/output_modules.py | pyllyukko/plaso | 1,253 | 11073372 | <gh_stars>1000+
# -*- coding: utf-8 -*-
"""The output modules CLI arguments helper."""
import sys
from plaso.cli import tools
from plaso.cli.helpers import interface
from plaso.cli.helpers import manager
from plaso.lib import errors
from plaso.output import manager as output_manager
class OutputModulesArgumentsHelper(interface.ArgumentsHelper):
"""Output modules CLI arguments helper."""
NAME = 'output_modules'
DESCRIPTION = 'Output modules command line arguments.'
@classmethod
def AddArguments(cls, argument_group):
"""Adds command line arguments to an argument group.
This function takes an argument parser or an argument group object and adds
to it all the command line arguments this helper supports.
Args:
argument_group (argparse._ArgumentGroup|argparse.ArgumentParser):
argparse group.
"""
argument_group.add_argument(
'-o', '--output_format', '--output-format', metavar='FORMAT',
dest='output_format', default='dynamic', help=(
'The output format. Use "-o list" to see a list of available '
'output formats.'))
argument_group.add_argument(
'-w', '--write', metavar='OUTPUT_FILE', dest='write',
help='Output filename.')
# TODO: determine if this is repeated elsewhere and refactor this into
# a helper function.
arguments = sys.argv[1:]
argument_index = 0
if '-o' in arguments:
argument_index = arguments.index('-o') + 1
elif '--output_format' in arguments:
argument_index = arguments.index('--output_format') + 1
elif '--output-format' in arguments:
argument_index = arguments.index('--output-format') + 1
if 0 < argument_index < len(arguments):
names = [name.strip() for name in arguments[argument_index].split(',')]
else:
names = ['dynamic']
if names and names != ['list']:
manager.ArgumentHelperManager.AddCommandLineArguments(
argument_group, category='output', names=names)
@classmethod
def ParseOptions(cls, options, configuration_object):
"""Parses and validates options.
Args:
options (argparse.Namespace): parser options.
configuration_object (CLITool): object to be configured by the argument
helper.
Raises:
BadConfigObject: when the configuration object is of the wrong type.
BadConfigOption: when the output format is not supported or the output
is not provided or already exists.
"""
if not isinstance(configuration_object, tools.CLITool):
raise errors.BadConfigObject(
'Configuration object is not an instance of CLITool')
output_format = getattr(options, 'output_format', 'dynamic')
output_filename = getattr(options, 'write', None)
if output_format != 'list':
if not output_manager.OutputManager.HasOutputClass(output_format):
raise errors.BadConfigOption(
'Unsupported output format: {0:s}.'.format(output_format))
setattr(configuration_object, '_output_format', output_format)
setattr(configuration_object, '_output_filename', output_filename)
manager.ArgumentHelperManager.RegisterHelper(OutputModulesArgumentsHelper)
|
paasta_tools/cli/cmds/validate.py | sobolevn/paasta | 1,711 | 11073375 | <reponame>sobolevn/paasta<filename>paasta_tools/cli/cmds/validate.py<gh_stars>1000+
#!/usr/bin/env python
# Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import pkgutil
from collections import Counter
from glob import glob
import yaml
from jsonschema import Draft4Validator
from jsonschema import exceptions
from jsonschema import FormatChecker
from jsonschema import ValidationError
from paasta_tools.cli.utils import failure
from paasta_tools.cli.utils import get_file_contents
from paasta_tools.cli.utils import get_instance_config
from paasta_tools.cli.utils import guess_service_name
from paasta_tools.cli.utils import lazy_choices_completer
from paasta_tools.cli.utils import PaastaColors
from paasta_tools.cli.utils import success
from paasta_tools.kubernetes_tools import sanitise_kubernetes_name
from paasta_tools.secret_tools import get_secret_name_from_ref
from paasta_tools.secret_tools import is_secret_ref
from paasta_tools.secret_tools import is_shared_secret
from paasta_tools.tron_tools import list_tron_clusters
from paasta_tools.tron_tools import validate_complete_config
from paasta_tools.utils import get_service_instance_list
from paasta_tools.utils import list_all_instances_for_service
from paasta_tools.utils import list_clusters
from paasta_tools.utils import list_services
from paasta_tools.utils import load_system_paasta_config
SCHEMA_VALID = success("Successfully validated schema")
SCHEMA_ERROR = failure(
"Failed to load schema.",
"http://paasta.readthedocs.io/en/latest/yelpsoa_configs.html",
)
SCHEMA_INVALID = failure(
"Failed to validate schema. More info:",
"http://paasta.readthedocs.io/en/latest/yelpsoa_configs.html",
)
SCHEMA_NOT_FOUND = failure(
"Failed to find schema to validate against. More info:",
"http://paasta.readthedocs.io/en/latest/yelpsoa_configs.html",
)
FAILED_READING_FILE = failure(
"Failed to read file. More info:",
"http://paasta.readthedocs.io/en/latest/yelpsoa_configs.html",
)
UNKNOWN_SERVICE = (
"Unable to determine service to validate.\n"
"Please supply the %s name you wish to "
"validate with the %s option."
% (PaastaColors.cyan("SERVICE"), PaastaColors.cyan("-s"))
)
def invalid_tron_namespace(cluster, output, filename):
return failure(
"%s is invalid:\n %s\n " "More info:" % (filename, output),
"http://tron.readthedocs.io/en/latest/jobs.html",
)
def valid_tron_namespace(cluster, filename):
return success(f"{filename} is valid.")
def duplicate_instance_names_message(service, cluster, instance_names):
instance_name_list = "\n\t".join(instance_names)
message = (
f"Service {service} uses the following duplicate instance names for "
f"cluster {cluster}:\n\t{instance_name_list}\n"
)
return failure(
message, "https://paasta.readthedocs.io/en/latest/yelpsoa_configs.html"
)
def no_duplicate_instance_names_message(service, cluster):
return success(f"All {service}'s instance names in cluster {cluster} are unique")
def get_schema(file_type):
"""Get the correct schema to use for validation
:param file_type: what schema type should we validate against
"""
schema_path = "schemas/%s_schema.json" % file_type
try:
schema = pkgutil.get_data("paasta_tools.cli", schema_path).decode()
except IOError:
return None
return json.loads(schema)
def validate_instance_names(config_file_object, file_path):
errors = []
for instance_name in config_file_object:
if (
not instance_name.startswith("_")
and len(sanitise_kubernetes_name(instance_name)) > 63
):
errors.append(instance_name)
if errors:
error_string = "\n".join(errors)
print(
failure(
f"Length of instance name \n{error_string}\n should be no more than 63."
+ " Note _ is replaced with -- due to Kubernetes restriction",
"http://paasta.readthedocs.io/en/latest/yelpsoa_configs.html",
)
)
return len(errors) == 0
def validate_service_name(service):
if len(sanitise_kubernetes_name(service)) > 63:
print(
failure(
f"Length of service name {service} should be no more than 63."
+ " Note _ is replaced with - due to Kubernetes restriction",
"http://paasta.readthedocs.io/en/latest/yelpsoa_configs.html",
)
)
return False
return True
def get_config_file_dict(file_path):
basename = os.path.basename(file_path)
extension = os.path.splitext(basename)[1]
try:
config_file = get_file_contents(file_path)
if extension == ".yaml":
return yaml.safe_load(config_file)
elif extension == ".json":
return json.loads(config_file)
else:
return config_file
except Exception:
print(f"{FAILED_READING_FILE}: {file_path}")
raise
def validate_schema(file_path, file_type):
"""Check if the specified config file has a valid schema
:param file_path: path to file to validate
:param file_type: what schema type should we validate against
"""
try:
schema = get_schema(file_type)
except Exception as e:
print(f"{SCHEMA_ERROR}: {file_type}, error: {e!r}")
return
if schema is None:
print(f"{SCHEMA_NOT_FOUND}: {file_path}")
return
validator = Draft4Validator(schema, format_checker=FormatChecker())
basename = os.path.basename(file_path)
config_file_object = get_config_file_dict(file_path)
try:
validator.validate(config_file_object)
if file_type == "kubernetes" and not validate_instance_names(
config_file_object, file_path
):
return
except ValidationError:
print(f"{SCHEMA_INVALID}: {file_path}")
errors = validator.iter_errors(config_file_object)
print(" Validation Message: %s" % exceptions.best_match(errors).message)
except Exception as e:
print(f"{SCHEMA_ERROR}: {file_type}, error: {e!r}")
return
else:
print(f"{SCHEMA_VALID}: {basename}")
return True
def validate_all_schemas(service_path):
"""Finds all recognized config files in service directory,
and validates their schema.
:param service_path: path to location of configuration files
"""
path = os.path.join(service_path, "*.yaml")
returncode = True
for file_name in glob(path):
if os.path.islink(file_name):
continue
basename = os.path.basename(file_name)
for file_type in ["marathon", "adhoc", "tron", "kubernetes"]:
if basename.startswith(file_type):
if not validate_schema(file_name, file_type):
returncode = False
return returncode
def add_subparser(subparsers):
validate_parser = subparsers.add_parser(
"validate",
description="Execute 'paasta validate' from service repo root",
help="Validate that all paasta config files in pwd are correct",
)
validate_parser.add_argument(
"-s",
"--service",
required=False,
help="Service that you want to validate. Like 'example_service'.",
).completer = lazy_choices_completer(list_services)
validate_parser.add_argument(
"-y",
"--yelpsoa-config-root",
dest="yelpsoa_config_root",
default=os.getcwd(),
required=False,
help="Path to root of yelpsoa-configs checkout",
)
validate_parser.set_defaults(command=paasta_validate)
def check_service_path(service_path):
"""Check that the specified path exists and has yaml files
:param service_path: Path to directory that should contain yaml files
"""
if not service_path or not os.path.isdir(service_path):
print(
failure(
"%s is not a directory" % service_path,
"http://paasta.readthedocs.io/en/latest/yelpsoa_configs.html",
)
)
return False
if not glob(os.path.join(service_path, "*.yaml")):
print(
failure(
"%s does not contain any .yaml files" % service_path,
"http://paasta.readthedocs.io/en/latest/yelpsoa_configs.html",
)
)
return False
return True
def get_service_path(service, soa_dir):
"""Determine the path of the directory containing the conf files
:param service: Name of service
:param soa_dir: Directory containing soa configs for all services
"""
if service:
service_path = os.path.join(soa_dir, service)
else:
if soa_dir == os.getcwd():
service_path = os.getcwd()
else:
print(UNKNOWN_SERVICE)
return None
return service_path
def path_to_soa_dir_service(service_path):
"""Split a service_path into its soa_dir and service name components"""
soa_dir = os.path.dirname(service_path)
service = os.path.basename(service_path)
return soa_dir, service
def validate_tron(service_path):
soa_dir, service = path_to_soa_dir_service(service_path)
returncode = True
for cluster in list_tron_clusters(service, soa_dir):
if not validate_tron_namespace(service, cluster, soa_dir):
returncode = False
return returncode
def validate_tron_namespace(service, cluster, soa_dir, tron_dir=False):
if tron_dir:
display_name = f"{cluster}/{service}.yaml"
else:
display_name = f"tron-{cluster}.yaml"
messages = validate_complete_config(service, cluster, soa_dir)
returncode = len(messages) == 0
if messages:
print(invalid_tron_namespace(cluster, "\n ".join(messages), display_name))
else:
print(valid_tron_namespace(cluster, display_name))
return returncode
def validate_paasta_objects(service_path):
soa_dir, service = path_to_soa_dir_service(service_path)
returncode = True
messages = []
for cluster in list_clusters(service, soa_dir):
for instance in list_all_instances_for_service(
service=service, clusters=[cluster], soa_dir=soa_dir
):
instance_config = get_instance_config(
service=service,
instance=instance,
cluster=cluster,
load_deployments=False,
soa_dir=soa_dir,
)
messages.extend(instance_config.validate())
returncode = len(messages) == 0
if messages:
errors = "\n".join(messages)
print(failure((f"There were failures validating {service}: {errors}"), ""))
else:
print(success(f"All PaaSTA Instances for are valid for all clusters"))
return returncode
def validate_unique_instance_names(service_path):
"""Check that the service does not use the same instance name more than once"""
soa_dir, service = path_to_soa_dir_service(service_path)
check_passed = True
for cluster in list_clusters(service, soa_dir):
service_instances = get_service_instance_list(
service=service, cluster=cluster, soa_dir=soa_dir
)
instance_names = [service_instance[1] for service_instance in service_instances]
instance_name_to_count = Counter(instance_names)
duplicate_instance_names = [
instance_name
for instance_name, count in instance_name_to_count.items()
if count > 1
]
if duplicate_instance_names:
check_passed = False
print(
duplicate_instance_names_message(
service, cluster, duplicate_instance_names
)
)
else:
print(no_duplicate_instance_names_message(service, cluster))
return check_passed
def validate_autoscaling_configs(service_path):
"""Validate new autoscaling configurations that are not validated by jsonschema for the service of interest.
:param service_path: Path to directory containing soa conf yaml files for service
"""
soa_dir, service = path_to_soa_dir_service(service_path)
returncode = True
for cluster in list_clusters(service, soa_dir):
for instance in list_all_instances_for_service(
service=service, clusters=[cluster], soa_dir=soa_dir
):
instance_config = get_instance_config(
service=service,
instance=instance,
cluster=cluster,
load_deployments=False,
soa_dir=soa_dir,
)
if (
instance_config.get_instance_type() == "kubernetes"
and instance_config.is_autoscaling_enabled()
):
autoscaling_params = instance_config.get_autoscaling_params()
if autoscaling_params["metrics_provider"] in {"uwsgi", "http"}:
# a service may omit both of these keys, but we provide our own
# default setpoint for all metrics providers so we are safe to
# unconditionally read it
setpoint = autoscaling_params["setpoint"]
offset = autoscaling_params.get("offset", 0)
if setpoint - offset <= 0:
returncode = False
print(
failure(
msg="Autoscaling configuration is invalid: offset must be "
f"smaller than setpoint\n\t(setpoint: {setpoint} | offset: {offset})",
link="",
)
)
return returncode
def validate_min_max_instances(service_path):
soa_dir, service = path_to_soa_dir_service(service_path)
returncode = True
for cluster in list_clusters(service, soa_dir):
for instance in list_all_instances_for_service(
service=service, clusters=[cluster], soa_dir=soa_dir
):
instance_config = get_instance_config(
service=service,
instance=instance,
cluster=cluster,
load_deployments=False,
soa_dir=soa_dir,
)
if instance_config.get_instance_type() != "tron":
min_instances = instance_config.get_min_instances()
max_instances = instance_config.get_max_instances()
if min_instances is not None and max_instances is not None:
if max_instances < min_instances:
returncode = False
print(
failure(
f"Instance {instance} on cluster {cluster} has a greater number of min_instances than max_instances."
+ f"The number of min_instances ({min_instances}) cannot be greater than the max_instances ({max_instances}).",
"",
)
)
return returncode
def check_secrets_for_instance(instance_config_dict, soa_dir, service_path, vault_env):
return_value = True
for env_value in instance_config_dict.get("env", {}).values():
if is_secret_ref(env_value):
secret_name = get_secret_name_from_ref(env_value)
if is_shared_secret(env_value):
secret_file_name = f"{soa_dir}/_shared/secrets/{secret_name}.json"
else:
secret_file_name = f"{service_path}/secrets/{secret_name}.json"
if os.path.isfile(secret_file_name):
secret_json = get_config_file_dict(secret_file_name)
if "ciphertext" not in secret_json["environments"].get(vault_env, {}):
print(
failure(
f"Secret {secret_name} not defined for ecosystem {vault_env} on secret file {secret_file_name}",
"",
)
)
return_value = False
else:
print(failure(f"Secret file {secret_file_name} not defined", ""))
return_value = False
return return_value
def validate_secrets(service_path):
soa_dir, service = path_to_soa_dir_service(service_path)
system_paasta_config = load_system_paasta_config()
vault_cluster_map = system_paasta_config.get_vault_cluster_config()
return_value = True
for cluster in list_clusters(service, soa_dir):
vault_env = vault_cluster_map.get(cluster)
if not vault_env:
print(failure(f"{cluster} not found on vault_cluster_map", ""))
return_value = False
continue
for instance in list_all_instances_for_service(
service=service, clusters=[cluster], soa_dir=soa_dir
):
instance_config = get_instance_config(
service=service,
instance=instance,
cluster=cluster,
load_deployments=False,
soa_dir=soa_dir,
)
if not check_secrets_for_instance(
instance_config.config_dict, soa_dir, service_path, vault_env
):
return_value = False
if return_value:
print(success("No orphan secrets found"))
return return_value
def paasta_validate_soa_configs(service, service_path):
"""Analyze the service in service_path to determine if the conf files are valid
:param service_path: Path to directory containing soa conf yaml files for service
"""
if not check_service_path(service_path):
return False
if not validate_service_name(service):
return False
returncode = True
if not validate_all_schemas(service_path):
returncode = False
if not validate_tron(service_path):
returncode = False
if not validate_paasta_objects(service_path):
returncode = False
if not validate_unique_instance_names(service_path):
returncode = False
if not validate_autoscaling_configs(service_path):
returncode = False
if not validate_secrets(service_path):
returncode = False
if not validate_min_max_instances(service_path):
returncode = False
return returncode
def paasta_validate(args):
"""Generate a service_path from the provided args and call paasta_validate_soa_configs
:param args: argparse.Namespace obj created from sys.args by cli
"""
service_path = get_service_path(args.service, args.yelpsoa_config_root)
service = args.service or guess_service_name()
if not paasta_validate_soa_configs(service, service_path):
return 1
|
ci/scripts/python/nrf5_cmake/library_operations.py | perfectco/cmake-nRF5x | 111 | 11073390 | from __future__ import annotations
import json
from unittest import TestCase
from nrf5_cmake.library import Library, LibraryProperty, Property
from nrf5_cmake.library_description import LibraryDescription, LibraryVariant, LibraryPatch, LibraryOperation, LibraryVersion
from nrf5_cmake.version import Version
from typing import Any, Dict, List, Set, Tuple
def libraries_load_from_file(filepath: str) -> Dict[str, LibraryDescription]:
libs: Dict[str, LibraryDescription] = {}
with open(filepath, 'r') as file:
json_libs: Dict[str, Any] = json.load(file)
if not isinstance(json_libs, dict):
raise Exception("Exampected a dictionary of libraries")
for json_lib_name in json_libs:
json_lib = json_libs[json_lib_name]
libs[json_lib_name] = LibraryDescription.from_json(json_lib)
return libs
def libraries_save_to_file(filepath: str, libraries: Dict[str, LibraryDescription]):
json_libs: Dict[str, Any] = {}
with open(filepath, 'w') as file:
for lib_name, lib in libraries.items():
json_libs[lib_name] = lib.to_json()
json.dump(json_libs, file, indent=2)
def libraries_dependencies_per_sdk(selected_libraries: Dict[str, LibraryDescription],
all_libraries: Dict[str, LibraryDescription],
supported_sdks: List[Version]) -> Dict[Version, Set[str]]:
# Collect a list of dependencies for each SDK version.
sdk_dependencies: Dict[Version, Set[str]] = {}
for sdk_version in supported_sdks:
# Get list of all dependencies including libraries
dependencies: Set[str] = set()
for (library_name, library_desc) in selected_libraries.items():
library_for_sdk = library_desc.library_for_sdk_version(sdk_version)
if library_for_sdk == None:
continue
dependencies.add(library_name)
dependencies.update(library_for_sdk.get_prop(
LibraryProperty.DEPENDENCIES
).get_all_items())
# Iterate over all existing dependencies and collect new ones.
# If expanded list of dependencies is bigger than original ones
# continue.
while True:
new_dependencies = dependencies.copy()
for dependency in dependencies:
# Check if dependecy exists...
if not dependency in all_libraries:
print(f"WARNING: dependency {dependency} doesn't exist")
continue
library_dep_desc = all_libraries[dependency]
# Check if dependency exists for this SDK version.
library_dep = library_dep_desc.library_for_sdk_version(
sdk_version)
if library_dep == None:
print(
f"WARNING: dependency {dependency} should exist for SDK {sdk_version}")
continue
# Get all dependencies and apply them.
library_dep_dep_list = library_dep.get_prop(
LibraryProperty.DEPENDENCIES
).get_all_items()
new_dependencies.update(library_dep_dep_list)
# Check if two sets are the same
if new_dependencies == dependencies:
break
# Use new extended list of dependencies.
dependencies = new_dependencies
# Add generated dependencies to version
sdk_dependencies[sdk_version] = dependencies
return sdk_dependencies
class LibrariesDependenciesPerSdkTestCase(TestCase):
def test_dependencies(self):
sdk_14 = Version.from_string("14.0.0")
sdk_15 = Version.from_string("15.0.0")
sdk_16 = Version.from_string("16.0.0")
library_a = LibraryDescription(
variant=LibraryVariant.OBJECT,
library=Library(
dependencies=Property(public={"b"})
),
sdk_version=LibraryVersion(sdk_15),
patches=[
LibraryPatch(
operation=LibraryOperation.ADD,
sdk_version=LibraryVersion(sdk_15),
library=Library(dependencies=Property(public={"c"}))
),
LibraryPatch(
operation=LibraryOperation.REMOVE,
sdk_version=LibraryVersion(sdk_16),
library=Library(dependencies=Property(public={"c"}))
),
LibraryPatch(
operation=LibraryOperation.ADD,
sdk_version=LibraryVersion(sdk_16),
library=Library(dependencies=Property(public={"d"}))
),
]
)
library_b = LibraryDescription(
variant=LibraryVariant.OBJECT,
)
library_c = LibraryDescription(
variant=LibraryVariant.OBJECT
)
library_d = LibraryDescription(
variant=LibraryVariant.OBJECT,
library=Library(dependencies=Property(public={"e"}))
)
library_e = LibraryDescription(
variant=LibraryVariant.OBJECT
)
library_f = LibraryDescription(
variant=LibraryVariant.OBJECT
)
all_libraries = {
"a": library_a,
"b": library_b,
"c": library_c,
"d": library_d,
"e": library_e,
"f": library_f
}
supported_sdks = [
sdk_14,
sdk_15,
sdk_16
]
result = libraries_dependencies_per_sdk(
{"a": library_a},
all_libraries,
supported_sdks
)
self.assertEqual(result[sdk_14], set())
self.assertEqual(result[sdk_15], {"a", "b", "c"})
self.assertEqual(result[sdk_16], {"a", "b", "d", "e"})
|
fhirclient/server_tests.py | carolinarsm/client-py | 418 | 11073398 | # -*- coding: utf-8 -*-
import os
import io
import json
import shutil
import server
import unittest
import models.fhirabstractbase as fabst
class TestServer(unittest.TestCase):
def tearDown(self):
if os.path.exists('metadata'):
os.remove('metadata')
def testValidCapabilityStatement(self):
shutil.copyfile('test_metadata_valid.json', 'metadata')
mock = MockServer()
mock.get_capability()
self.assertIsNotNone(mock.auth._registration_uri)
self.assertIsNotNone(mock.auth._authorize_uri)
self.assertIsNotNone(mock.auth._token_uri)
def testStateConservation(self):
shutil.copyfile('test_metadata_valid.json', 'metadata')
mock = MockServer()
self.assertIsNotNone(mock.capabilityStatement)
fhir = server.FHIRServer(None, state=mock.state)
self.assertIsNotNone(fhir.auth._registration_uri)
self.assertIsNotNone(fhir.auth._authorize_uri)
self.assertIsNotNone(fhir.auth._token_uri)
def testInvalidCapabilityStatement(self):
shutil.copyfile('test_metadata_invalid.json', 'metadata')
mock = MockServer()
try:
mock.get_capability()
self.assertTrue(False, "Must have thrown exception")
except fabst.FHIRValidationError as e:
self.assertEqual(4, len(e.errors))
self.assertEqual("date:", str(e.errors[0])[:5])
self.assertEqual("format:", str(e.errors[1])[:7])
self.assertEqual("rest.0:", str(e.errors[2])[:7])
self.assertEqual("operation.1:", str(e.errors[2].errors[0])[:12])
self.assertEqual("definition:", str(e.errors[2].errors[0].errors[0])[:11])
self.assertEqual("Wrong type <class 'dict'>", str(e.errors[2].errors[0].errors[0].errors[0])[:25])
self.assertEqual("security:", str(e.errors[2].errors[1])[:9])
self.assertEqual("service.0:", str(e.errors[2].errors[1].errors[0])[:10])
self.assertEqual("coding.0:", str(e.errors[2].errors[1].errors[0].errors[0])[:9])
self.assertEqual("Superfluous entry \"systems\"", str(e.errors[2].errors[1].errors[0].errors[0].errors[0])[:27])
self.assertEqual("Superfluous entry \"formats\"", str(e.errors[3])[:27])
class MockServer(server.FHIRServer):
""" Reads local files.
"""
def __init__(self):
super().__init__(None, base_uri='https://fhir.smarthealthit.org')
def request_json(self, path, nosign=False):
assert path
with io.open(path, encoding='utf-8') as handle:
return json.load(handle)
return None
|
web/py-collaborator/Database.py | H1d3r/Penetration-Testing-Tools-1 | 1,139 | 11073406 | #!/usr/bin/python3
import pymysql
import pymysql.cursors
import pymysql.converters
from Logger import *
import datetime
DATABASE_LOGGING = False
class Logger:
@staticmethod
def _out(x):
if DATABASE_LOGGING:
sys.stderr.write(str(x) + u'\n')
@staticmethod
def dbg(x):
if DATABASE_LOGGING:
sys.stderr.write(u'[dbg] ' + str(x) + u'\n')
@staticmethod
def out(x):
Logger._out(u'[.] ' + str(x))
@staticmethod
def info(x):
Logger._out(u'[?] ' + str(x))
@staticmethod
def err(x):
if DATABASE_LOGGING:
sys.stderr.write(u'[!] ' + str(x) + u'\n')
@staticmethod
def warn(x):
Logger._out(u'[-] ' + str(x))
@staticmethod
def ok(x):
Logger._out(u'[+] ' + str(x))
class Database:
databaseConnection = None
databaseCursor = None
lastUsedCredentials = {
'host': '',
'user': '',
'password': '',
'db': ''
}
def __init__(self, initialId = 1000):
self.queryId = initialId
pass
def __del__(self):
self.close()
def close(self):
Logger.dbg("Closing database connection.")
if self.databaseConnection: self.databaseConnection.close()
self.databaseConnection = None
def connection(self, host, user, password, db = None):
try:
conv = pymysql.converters.conversions.copy()
conv[246] = float
conv[0] = float
if password:
self.databaseConnection = pymysql.connect(
host=host,
user=user,
passwd=password,
db=db,
cursorclass=pymysql.cursors.DictCursor,
conv = conv
)
else:
self.databaseConnection = pymysql.connect(
host=host,
user=user,
db=db,
cursorclass=pymysql.cursors.DictCursor,
conv=conv
)
#self.databaseConnection.set_character_set('utf8')
Logger.info("Database connection succeeded.")
self.lastUsedCredentials.update({
'host': host,
'user': user,
'password': password,
'db': db
})
return True
except (pymysql.Error, pymysql.Error) as e:
Logger.err("Database connection failed: " + str(e))
return False
def createCursor(self):
if self.databaseCursor:
self.databaseCursor.close()
self.databaseCursor = None
if not self.databaseConnection:
self.reconnect()
self.databaseCursor = self.databaseConnection.cursor()
# self.databaseCursor.execute('SET CHARACTER SET utf8;')
# self.databaseCursor.execute('SET NAMES utf8;')
# self.databaseCursor.execute('SET character_set_connection=utf8;')
# self.databaseCursor.execute('SET GLOBAL connect_timeout=28800;')
# self.databaseCursor.execute('SET GLOBAL wait_timeout=28800;')
# self.databaseCursor.execute('SET GLOBAL interactive_timeout=28800;')
# self.databaseCursor.execute('SET GLOBAL max_allowed_packet=1073741824;')
return self.databaseCursor
def query(self, query, tryAgain = False, params = None):
self.queryId += 1
if len(query)< 100:
Logger.dbg(u'SQL query (id: {}): "{}"'.format(self.queryId, query))
else:
Logger.dbg(u'SQL query (id: {}): "{}...{}"'.format(self.queryId, query[:80], query[-80:]))
try:
self.databaseCursor = self.createCursor()
if params:
self.databaseCursor.execute(query, args = params)
else:
self.databaseCursor.execute(query)
result = self.databaseCursor.fetchall()
num = 0
for row in result:
num += 1
if num > 5: break
if len(str(row)) < 100:
Logger.dbg(u'Query (ID: {}) ("{}") results:\nRow {}.: '.format(self.queryId, str(query), num) + str(row))
else:
Logger.dbg(u'Query (ID: {}) is too long'.format(self.queryId))
return result
except (pymysql.err.InterfaceError) as e:
pass
except (pymysql.Error) as e:
if Database.checkIfReconnectionNeeded(e):
if tryAgain == False:
Logger.err("Query (ID: {}) ('{}') failed. Need to reconnect.".format(self.queryId, query))
self.reconnect()
return self.query(query, True)
Logger.err("Query (ID: {}) ('{}') failed: ".format(self.queryId, query) + str(e))
return False
@staticmethod
def checkIfReconnectionNeeded(error):
try:
return (("MySQL server has gone away" in error[1]) or ('Lost connection to MySQL server' in error[1]))
except (IndexError, TypeError):
return False
def reconnect(self):
Logger.info("Trying to reconnect after failure (last query: {})...".format(self.queryId))
if self.databaseConnection != None:
try:
self.databaseConnection.close()
except:
pass
finally:
self.databaseConnection = None
self.connection(
self.lastUsedCredentials['host'],
self.lastUsedCredentials['user'],
self.lastUsedCredentials['password'],
self.lastUsedCredentials['db']
)
def insert(self, query, tryAgain = False):
'''
Executes SQL query that is an INSERT statement.
params:
query SQL INSERT query
returns:
(boolean Status, int AffectedRows, string Message)
Where:
Status - false on Error, true otherwise
AffectedRows - number of affected rows or error code on failure
Message - error message on failure, None otherwise
'''
self.queryId += 1
if len(query)< 100:
Logger.dbg(u'SQL INSERT query (id: {}): "{}"'.format(self.queryId, query))
else:
Logger.dbg(u'SQL INSERT query (id: {}): "{}...{}"'.format(self.queryId, query[:80], query[-80:]))
assert not query.lower().startswith('select '), "Method insert() must NOT be invoked with SELECT queries!"
try:
self.databaseCursor = self.createCursor()
self.databaseCursor.execute(query)
# Commit new records to the database
self.databaseConnection.commit()
return True, 1, None
except (pymysql.Error, pymysql.Error) as e:
try:
# Rollback introduced changes
self.databaseConnection.rollback()
except: pass
if Database.checkIfReconnectionNeeded(e):
if tryAgain == False:
Logger.err("Insert query (ID: {}) ('{}') failed. Need to reconnect.".format(self.queryId, query))
self.reconnect()
return self.insert(query, True)
Logger.err("Insert Query (ID: {}) ('{}') failed: ".format(self.queryId, query) + str(e))
return False, e.args[0], e.args[1]
def delete(self, query):
assert query.lower().startswith('delete '), "Method delete() must be invoked only with DELETE queries!"
return self.insert(query)
|
sourced/ml/tests/tfidf_data.py | vmarkovtsev/ml | 122 | 11073464 | from pyspark import Row
def readonly(_dict: dict):
return frozenset(_dict.items())
dataset = [
{"d": "1", "t": "1", "v": 3},
{"d": "1", "t": "2", "v": 2},
{"d": "1", "t": "3", "v": 1},
{"d": "2", "t": "1", "v": 4},
{"d": "2", "t": "2", "v": 5},
{"d": "3", "t": "1", "v": 6},
{"d": "4", "t": "1", "v": 4},
{"d": "4", "t": "2", "v": 3},
{"d": "4", "t": "3", "v": 2},
{"d": "4", "t": "4", "v": 1},
] * 10
datasets = [{"uast": [0, 1, 2],
"content": ["method_base", "QueryBuilder", "resolvedFiler"],
"file": [0, 1, 2],
"document": [0, 1, 2],
"lang": ["Python", "Scala", "Go"], },
{"uast": [0, 1, 2],
"content": ["method_base", "QueryBuilder", "resolvedFiler"],
"file": [0, 1, 2],
"document": [0, 0, 0],
"lang": ["Python", "Scala", "Go"], },
{"uast": [0, 1, 2, 3, 4, 5],
"content": ["method_base", "QueryBuilder", "resolvedFiler",
"Flag", "MinhashCuda", "baseConf"],
"file": [0, 1, 2, 3, 4, 5],
"document": [0, 1, 2, 3, 4, 5],
"lang": ["Python", "Scala", "Go", "Java", "C++", "JavaScript"], },
{"uast": [0, 1, 2, 3, 4, 5],
"content": ["method_base", "QueryBuilder", "resolvedFiler",
"Flag", "MinhashCuda", "baseConf"],
"file": [0, 1, 2, 3, 4, 5],
"document": [0, 0, 0, 0, 0, 0],
"lang": ["Python", "Scala", "Go", "Java", "C++", "JavaScript"], },
{"uast": [0, 1, 2, 3, 4, 5],
"content": ["method_base", "QueryBuilder", "resolvedFiler",
"Flag", "MinhashCuda", "baseConf"],
"file": [0, 1, 2, 3, 4, 5],
"document": [0, 0, 0, 1, 1, 1],
"lang": ["Python", "Scala", "Go", "Java", "C++", "JavaScript"], },
]
ids_result = [{Row(token="method_base", token_split="method base"),
Row(token="QueryBuilder", token_split="query builder"),
Row(token="resolvedFiler", token_split="resolved filer"),
},
{Row(token="method_base", token_split="method base"),
Row(token="QueryBuilder", token_split="query builder"),
Row(token="resolvedFiler", token_split="resolved filer"),
},
{Row(token="method_base", token_split="method base"),
Row(token="QueryBuilder", token_split="query builder"),
Row(token="resolvedFiler", token_split="resolved filer"),
Row(token="Flag", token_split="flag"),
Row(token="MinhashCuda", token_split="minhash cuda"),
Row(token="baseConf", token_split="base conf"),
},
{Row(token="method_base", token_split="method base"),
Row(token="QueryBuilder", token_split="query builder"),
Row(token="resolvedFiler", token_split="resolved filer"),
Row(token="Flag", token_split="flag"),
Row(token="MinhashCuda", token_split="minhash cuda"),
Row(token="baseConf", token_split="base conf"),
},
{Row(token="method_base", token_split="method base"),
Row(token="QueryBuilder", token_split="query builder"),
Row(token="resolvedFiler", token_split="resolved filer"),
Row(token="Flag", token_split="flag"),
Row(token="MinhashCuda", token_split="minhash cuda"),
Row(token="baseConf", token_split="base conf"),
},
]
ids_split_result = [{Row(token="method_base", token_split="method base"),
Row(token="QueryBuilder", token_split="query builder"),
Row(token="resolvedFiler", token_split="resolved filer"),
},
{Row(token="method_base", token_split="method base"),
Row(token="QueryBuilder", token_split="query builder"),
Row(token="resolvedFiler", token_split="resolved filer"),
},
{Row(token="method_base", token_split="method base"),
Row(token="QueryBuilder", token_split="query builder"),
Row(token="resolvedFiler", token_split="resolved filer"),
Row(token="Minhash<PASSWORD>", token_split="minhash cuda"),
Row(token="baseConf", token_split="base conf"),
},
{Row(token="method_base", token_split="method base"),
Row(token="QueryBuilder", token_split="query builder"),
Row(token="resolvedFiler", token_split="resolved filer"),
Row(token="Minhash<PASSWORD>", token_split="minhash cuda"),
Row(token="baseConf", token_split="base conf"),
},
{Row(token="method_base", token_split="method base"),
Row(token="QueryBuilder", token_split="query builder"),
Row(token="resolvedFiler", token_split="resolved filer"),
Row(token="MinhashCuda", token_split="minhash cuda"),
Row(token="baseConf", token_split="base conf"),
},
]
ids_split_idfreq_result = [{Row(num_files=1, num_occ=1, num_repos=1,
token="method_base", token_split="method base"),
Row(num_files=1, num_occ=1, num_repos=1,
token="QueryBuilder", token_split="query builder"),
Row(num_files=1, num_occ=1, num_repos=1,
token="resolvedFiler", token_split="resolved filer"),
},
{Row(num_files=1, num_occ=1, num_repos=1,
token="method_base", token_split="method base"),
Row(num_files=1, num_occ=1, num_repos=1,
token="QueryBuilder", token_split="query builder"),
Row(num_files=1, num_occ=1, num_repos=1,
token="resolvedFiler", token_split="resolved filer"),
},
{Row(num_files=1, num_occ=1, num_repos=1,
token="method_base", token_split="method base"),
Row(num_files=1, num_occ=1, num_repos=1,
token="QueryBuilder", token_split="query builder"),
Row(num_files=1, num_occ=1, num_repos=1,
token="resolvedFiler", token_split="resolved filer"),
Row(num_files=1, num_occ=1, num_repos=1,
token="MinhashCuda", token_split="minhash cuda"),
Row(num_files=1, num_occ=1, num_repos=1,
token="baseConf", token_split="base conf"),
},
{Row(num_files=1, num_occ=1, num_repos=1,
token="method_base", token_split="method base"),
Row(num_files=1, num_occ=1, num_repos=1,
token="QueryBuilder", token_split="query builder"),
Row(num_files=1, num_occ=1, num_repos=1,
token="resolvedFiler", token_split="resolved filer"),
Row(num_files=1, num_occ=1, num_repos=1,
token="MinhashCuda", token_split="minhash cuda"),
Row(num_files=1, num_occ=1, num_repos=1,
token="baseConf", token_split="base conf"),
},
{Row(num_files=1, num_occ=1, num_repos=1,
token="method_base", token_split="method base"),
Row(num_files=1, num_occ=1, num_repos=1,
token="QueryBuilder", token_split="query builder"),
Row(num_files=1, num_occ=1, num_repos=1,
token="resolvedFiler", token_split="resolved filer"),
Row(num_files=1, num_occ=1, num_repos=1,
token="MinhashCuda", token_split="minhash cuda"),
Row(num_files=1, num_occ=1, num_repos=1,
token="baseConf", token_split="base conf"),
},
]
term_freq_result = {
readonly({"d": "1", "t": "1", "v": 30}),
readonly({"d": "1", "t": "2", "v": 20}),
readonly({"d": "1", "t": "3", "v": 10}),
readonly({"d": "2", "t": "1", "v": 40}),
readonly({"d": "2", "t": "2", "v": 50}),
readonly({"d": "3", "t": "1", "v": 60}),
readonly({"d": "4", "t": "1", "v": 40}),
readonly({"d": "4", "t": "2", "v": 30}),
readonly({"d": "4", "t": "3", "v": 20}),
readonly({"d": "4", "t": "4", "v": 10}),
}
doc_freq_result = {
"1": 4,
"2": 3,
"3": 2,
"4": 1,
}
|
caffe-grid/src/main/python/com/yahoo/ml/caffe/ReflectionUtil.py | jenniew/IntelCaffeOnSpark_mirror | 1,436 | 11073482 | '''
Copyright 2016 Yahoo Inc.
Licensed under the terms of the Apache 2.0 license.
Please see LICENSE file in the project root for terms.
This module contains various routines involving Java's class objects for convenience.
'''
import string
'''
There is a distinction between py4j's "Java Classes" and Java's "Class Objects".
py4j's "Java Classes" are what is returned by finding the Java class in the jvm,
i.e., jvm.java.lang.Object.
In contrast, Java's "Class Objects" are what is returned by calling getClass() on an
instance, i.e., jvm.java.lang.Object().getClass() or java.lang.Class.forName(<Class name>)
Both types are frequently used: The former makes arrays of that type,allows
new instances of the class to be made, and static methods of that class to be called,
while the latter allow for the use of reflection.
'''
'''
Returns the Java class object for the class of the given name.
The name may not be abbreviated by stripping its package name.
'''
def javaClassObject(name):
return jvm.java.lang.Class.forName(name)
'''
Returns the Java class object corresponding to the given Scala type.
'''
def javaClassFromScala(scalaType,mirror):
return mirror.runtimeClass(scalaType)
'''
Returns the class object for java.lang.Object.
'''
def javaObjectClass():
return javaClassObject("java.lang.Object")
'''
Returns the class object for java.lang.Class.
'''
def javaClassClass():
return javaClassObject("java.lang.Class")
'''
Returns an empty array of Object.
'''
def emptyArray():
return gateway.new_array(jvm.java.lang.Object,0)
'''
Returns an unassigned Object array with length num.
'''
def objectArray(num):
return gateway.new_array(jvm.java.lang.Object,num)
'''
Returns an array of java class objects from a list of strs
of the full names of the classes.
'''
def classObjectArray(classNameList):
arr = gateway.new_array(jvm.java.lang.Class,len(classNameList))
for i,name in enumerate(classNameList):
arr[i]=javaClassObject(name)
return arr
'''
Returns a class object array in which each element is assigned to
the class for java.lang.Object.
The array has length num.
'''
def objectClassArray(num=1):
arr = gateway.new_array(jvm.java.lang.Class,num)
for i in range(num):
arr[i] = javaObjectClass()
return arr
'''
Returns True if the argument is a Java class object and False otherwise.
'''
def isClass(obj):
return javaClassClass().isAssignableFrom(obj.getClass())
'''
Returns True if the given Java class object represents
an instantiable class and False otherwise.
'''
def isInstantiable(javaClass):
return not (jvm.java.lang.reflect.Modifier.isAbstract(javaClass.getModifiers()) or javaClass.isInterface())
'''
Returns True if the given Java type represents
a parameterized type and False otherwise.
'''
def isParameterizedType(javaType):
return javaClassObject("java.lang.reflect.ParameterizedType").isAssignableFrom(javaType.getClass())
'''
Returns True if the given Java type represents
a type variable and False otherwise.
'''
def isTypeVariable(javaType):
return javaClassObject("java.lang.reflect.TypeVariable").isAssignableFrom(javaType.getClass())
'''
Returns True if the given Java type represents
a generic array type and False otherwise.
'''
def isGenericArrayType(javaType):
return javaClassObject("java.lang.reflect.GenericArrayType").isAssignableFrom(javaType.getClass())
'''
Returns True if the given Java method takes a variable number of arguments and
False otherwise.
'''
def isVarArgs(method):
try:
return method.isVarArgs()
except:
return False
'''
Returns the Scala singleton Object for the class
named className.
'''
def getScalaSingleton(className):
uni=jvm.scala.reflect.runtime.package.universe()
rtm=uni.runtimeMirror(javaClassObject(className+'$').getClassLoader())
clsm=rtm.classSymbol(javaClassObject(className+'$'))
modSym=clsm.module()
modMir=rtm.reflectModule(modSym)
return modMir.instance()
'''
Returns the class tag of the given class.
'''
def getClassTag(javaClass):
return getScalaSingleton("scala.reflect.ClassTag").apply(javaClass)
'''
Returns a Scala List of the parameter types of a Scala method.
'''
def getParameterTypes(scalaMethodSymbol):
retList=[]
it1=scalaMethodSymbol.paramss().toIterator()
while it1.hasNext():
it2=it1.next().toIterator()
while it2.hasNext():
signature=it2.next().typeSignature()
if str(signature)[-1]=='*':
retList.append(getGenericParameterTypes(signature).head())
else:
retList.append(signature)
return retList
'''
Returns a Scala List of the generic parameter types of a Scala type.
'''
def getGenericParameterTypes(scalaType):
return jvm.com.yahoo.ml.caffe.python.General.getTypeParameters(scalaType)
'''
Returns the global Scala ExecutionContext.
'''
def getScalaExecutionContext():
className="scala.concurrent.ExecutionContext"
return javaMethodObject(className+'$',"global",classObjectArray([])).invoke(getScalaSingleton(className),emptyArray())
'''
Returns the Java method object of name methodName with the class named className taking arguments
specified by a list of the names of the argument classes.
'''
def javaMethodObject(className,methodName,argumentClassNameList=["java.lang.Object"]):
return javaClassObject(className).getMethod(methodName,classObjectArray(argumentClassNameList))
'''
Returns a Java method of name methodName of the class className
which has no arguments.
'''
def arglessJavaMethodObject(className,methodName):
return javaMethodObject(className,methodName,[])
'''
Returns the Py4J JavaClass object equivalent to the Java class
object javaClass.
'''
def javaClassToPy4JClass(javaClass):
return jvm.__getattr__(javaClass.getName())
'''
Returns true is classObj most-likely represents a Scala Tuple class.
Due to some Scala magic, it is difficult to know with absolute certainty
without using unwieldy code, though this method should be correct in all cases
that are not constructs used to contradict it.
'''
def isTupleClass(classObj):
name = classObj.getName()
dollarSign=name.find('$')
if dollarSign != -1:
name=name[0:name.find('$')]
if len(name) > 11 and name[0:11]=="scala.Tuple":
if len(name) == 12 and name[-1] in string.digits:
return True
if len(name) == 13:
return (name[-2]=='1' and name[-1] in string.digits) or (name[-2]=='2' and name[-1] in string.digits and int(name[-1]) in range(3))
return False
|
src/cryptography/hazmat/primitives/keywrap.py | gorgiaxx/cryptography | 4,492 | 11073488 | <gh_stars>1000+
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
import struct
import typing
from cryptography.hazmat.primitives.ciphers import Cipher
from cryptography.hazmat.primitives.ciphers.algorithms import AES
from cryptography.hazmat.primitives.ciphers.modes import ECB
from cryptography.hazmat.primitives.constant_time import bytes_eq
def _wrap_core(
wrapping_key: bytes,
a: bytes,
r: typing.List[bytes],
) -> bytes:
# RFC 3394 Key Wrap - 2.2.1 (index method)
encryptor = Cipher(AES(wrapping_key), ECB()).encryptor()
n = len(r)
for j in range(6):
for i in range(n):
# every encryption operation is a discrete 16 byte chunk (because
# AES has a 128-bit block size) and since we're using ECB it is
# safe to reuse the encryptor for the entire operation
b = encryptor.update(a + r[i])
# pack/unpack are safe as these are always 64-bit chunks
a = struct.pack(
">Q", struct.unpack(">Q", b[:8])[0] ^ ((n * j) + i + 1)
)
r[i] = b[-8:]
assert encryptor.finalize() == b""
return a + b"".join(r)
def aes_key_wrap(
wrapping_key: bytes,
key_to_wrap: bytes,
backend: typing.Any = None,
) -> bytes:
if len(wrapping_key) not in [16, 24, 32]:
raise ValueError("The wrapping key must be a valid AES key length")
if len(key_to_wrap) < 16:
raise ValueError("The key to wrap must be at least 16 bytes")
if len(key_to_wrap) % 8 != 0:
raise ValueError("The key to wrap must be a multiple of 8 bytes")
a = b"\xa6\xa6\xa6\xa6\xa6\xa6\xa6\xa6"
r = [key_to_wrap[i : i + 8] for i in range(0, len(key_to_wrap), 8)]
return _wrap_core(wrapping_key, a, r)
def _unwrap_core(
wrapping_key: bytes,
a: bytes,
r: typing.List[bytes],
) -> typing.Tuple[bytes, typing.List[bytes]]:
# Implement RFC 3394 Key Unwrap - 2.2.2 (index method)
decryptor = Cipher(AES(wrapping_key), ECB()).decryptor()
n = len(r)
for j in reversed(range(6)):
for i in reversed(range(n)):
# pack/unpack are safe as these are always 64-bit chunks
atr = (
struct.pack(
">Q", struct.unpack(">Q", a)[0] ^ ((n * j) + i + 1)
)
+ r[i]
)
# every decryption operation is a discrete 16 byte chunk so
# it is safe to reuse the decryptor for the entire operation
b = decryptor.update(atr)
a = b[:8]
r[i] = b[-8:]
assert decryptor.finalize() == b""
return a, r
def aes_key_wrap_with_padding(
wrapping_key: bytes,
key_to_wrap: bytes,
backend: typing.Any = None,
) -> bytes:
if len(wrapping_key) not in [16, 24, 32]:
raise ValueError("The wrapping key must be a valid AES key length")
aiv = b"\xA6\x59\x59\xA6" + struct.pack(">i", len(key_to_wrap))
# pad the key to wrap if necessary
pad = (8 - (len(key_to_wrap) % 8)) % 8
key_to_wrap = key_to_wrap + b"\x00" * pad
if len(key_to_wrap) == 8:
# RFC 5649 - 4.1 - exactly 8 octets after padding
encryptor = Cipher(AES(wrapping_key), ECB()).encryptor()
b = encryptor.update(aiv + key_to_wrap)
assert encryptor.finalize() == b""
return b
else:
r = [key_to_wrap[i : i + 8] for i in range(0, len(key_to_wrap), 8)]
return _wrap_core(wrapping_key, aiv, r)
def aes_key_unwrap_with_padding(
wrapping_key: bytes,
wrapped_key: bytes,
backend: typing.Any = None,
) -> bytes:
if len(wrapped_key) < 16:
raise InvalidUnwrap("Must be at least 16 bytes")
if len(wrapping_key) not in [16, 24, 32]:
raise ValueError("The wrapping key must be a valid AES key length")
if len(wrapped_key) == 16:
# RFC 5649 - 4.2 - exactly two 64-bit blocks
decryptor = Cipher(AES(wrapping_key), ECB()).decryptor()
out = decryptor.update(wrapped_key)
assert decryptor.finalize() == b""
a = out[:8]
data = out[8:]
n = 1
else:
r = [wrapped_key[i : i + 8] for i in range(0, len(wrapped_key), 8)]
encrypted_aiv = r.pop(0)
n = len(r)
a, r = _unwrap_core(wrapping_key, encrypted_aiv, r)
data = b"".join(r)
# 1) Check that MSB(32,A) = A65959A6.
# 2) Check that 8*(n-1) < LSB(32,A) <= 8*n. If so, let
# MLI = LSB(32,A).
# 3) Let b = (8*n)-MLI, and then check that the rightmost b octets of
# the output data are zero.
(mli,) = struct.unpack(">I", a[4:])
b = (8 * n) - mli
if (
not bytes_eq(a[:4], b"\xa6\x59\x59\xa6")
or not 8 * (n - 1) < mli <= 8 * n
or (b != 0 and not bytes_eq(data[-b:], b"\x00" * b))
):
raise InvalidUnwrap()
if b == 0:
return data
else:
return data[:-b]
def aes_key_unwrap(
wrapping_key: bytes,
wrapped_key: bytes,
backend: typing.Any = None,
) -> bytes:
if len(wrapped_key) < 24:
raise InvalidUnwrap("Must be at least 24 bytes")
if len(wrapped_key) % 8 != 0:
raise InvalidUnwrap("The wrapped key must be a multiple of 8 bytes")
if len(wrapping_key) not in [16, 24, 32]:
raise ValueError("The wrapping key must be a valid AES key length")
aiv = b"\xa6\xa6\xa6\xa6\xa6\xa6\xa6\xa6"
r = [wrapped_key[i : i + 8] for i in range(0, len(wrapped_key), 8)]
a = r.pop(0)
a, r = _unwrap_core(wrapping_key, a, r)
if not bytes_eq(a, aiv):
raise InvalidUnwrap()
return b"".join(r)
class InvalidUnwrap(Exception):
pass
|
tests/pup/motors/busy.py | NStrijbosch/pybricks-micropython | 115 | 11073489 | # SPDX-License-Identifier: MIT
# Copyright (c) 2020 The Pybricks Authors
"""
Hardware Module: 1
Description: Checks that EBUSY is raised when changing settings while holding.
"""
from pybricks.pupdevices import Motor
from pybricks.parameters import Port
from uerrno import EBUSY
# Initialize the motor.
motor = Motor(Port.A)
# Rotate the motor to base position and hold it there.
motor.run_target(500, 0)
# Try to change a setting. This should raise an error because the motor is busy.
try:
motor.control.limits(duty=50)
except Exception as e:
exception = e
pass
# Assert that we got the expected exception.
assert exception.args[0] == EBUSY, "Did not raise expected exception."
|
InvenTree/company/migrations/0020_auto_20200413_0839.py | ArakniD/InvenTree | 656 | 11073536 | <reponame>ArakniD/InvenTree<gh_stars>100-1000
# Generated by Django 2.2.10 on 2020-04-13 08:39
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('company', '0019_auto_20200413_0642'),
]
operations = [
migrations.AlterField(
model_name='supplierpart',
name='supplier',
field=models.ForeignKey(help_text='Select supplier', limit_choices_to={'is_supplier': True}, on_delete=django.db.models.deletion.CASCADE, related_name='supplied_parts', to='company.Company'),
),
]
|
alex/applications/wsrouter/run.py | cifkao/alex | 184 | 11073559 | if __name__ == '__main__':
import autopath
from wsrouter import WSRouter
def main(addr, port, entry_timeout):
router = WSRouter(addr, port, entry_timeout)
router.run()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('addr')
parser.add_argument('port', type=int)
parser.add_argument('--entry_timeout', type=int, default=10)
args = parser.parse_args()
main(**vars(args)) |
lib/models/simple_convnet.py | unluckydan/deep_metric_learning | 169 | 11073560 | <filename>lib/models/simple_convnet.py
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 21 19:44:37 2017
@author: sakurai
"""
import chainer
import chainer.functions as F
import chainer.links as L
class SimpleConvnet(chainer.Chain):
def __init__(self, out_dim):
super(SimpleConvnet, self).__init__(
conv1=L.Convolution2D(3, 50, 3),
bn_conv1=L.BatchNormalization(50),
conv21=L.Convolution2D(50, 100, 3),
bn_conv21=L.BatchNormalization(100),
conv22=L.Convolution2D(100, 100, 1),
bn_conv22=L.BatchNormalization(100),
conv31=L.Convolution2D(100, 200, 3),
bn_conv31=L.BatchNormalization(200),
conv32=L.Convolution2D(200, 200, 3),
bn_conv32=L.BatchNormalization(200),
conv41=L.Convolution2D(200, 400, 3),
bn_conv41=L.BatchNormalization(400),
conv42=L.Convolution2D(400, 400, 1),
bn_conv42=L.BatchNormalization(400),
conv5=L.Convolution2D(400, 400, 1),
bn_conv5=L.BatchNormalization(400),
conv6=L.Convolution2D(400, 400, 1),
bn_conv6=L.BatchNormalization(400),
linear1=L.Linear(400, 400),
bn_linear1=L.BatchNormalization(400),
linear2=L.Linear(400, out_dim)
)
def __call__(self, x, train=False):
h = self.conv1(x)
h = self.bn_conv1(h, test=not train)
h = F.max_pooling_2d(h, 2)
h = F.relu(h)
h = self.conv21(h)
h = self.bn_conv21(h, test=not train)
h = F.relu(h)
h = self.conv22(h)
h = self.bn_conv22(h, test=not train)
h = F.max_pooling_2d(h, 2)
h = F.relu(h)
h = self.conv31(h)
h = self.bn_conv31(h, test=not train)
h = F.relu(h)
h = self.conv32(h)
h = self.bn_conv32(h, test=not train)
h = F.max_pooling_2d(h, 2)
h = F.relu(h)
h = self.conv41(h)
h = self.bn_conv41(h, test=not train)
h = F.relu(h)
h = self.conv42(h)
h = self.bn_conv42(h, test=not train)
h = F.max_pooling_2d(h, 2)
h = F.relu(h)
h = self.conv5(h)
h = self.bn_conv5(h, test=not train)
h = F.relu(h)
h = self.conv6(h)
h = self.bn_conv6(h, test=not train)
h = F.relu(h)
h = F.average_pooling_2d(h, h.shape[2:])
h = self.linear1(h)
h = self.bn_linear1(h, test=not train)
# h = F.dropout(h, ratio=0.5, train=train)
h = F.relu(h)
h = self.linear2(h)
return h
|
scripts/worker.py | varshar16/teuthology | 117 | 11073562 | import argparse
import teuthology.worker
def main():
teuthology.worker.main(parse_args())
def parse_args():
parser = argparse.ArgumentParser(description="""
Grab jobs from a beanstalk queue and run the teuthology tests they
describe. One job is run at a time.
""")
parser.add_argument(
'-v', '--verbose',
action='store_true', default=None,
help='be more verbose',
)
parser.add_argument(
'--archive-dir',
metavar='DIR',
help='path under which to archive results',
required=True,
)
parser.add_argument(
'-l', '--log-dir',
help='path in which to store logs',
required=True,
)
parser.add_argument(
'-t', '--tube',
help='which beanstalk tube to read jobs from',
required=True,
)
return parser.parse_args()
|
tests/modules/span_extractors/endpoint_span_extractor_test.py | MSLars/allennlp | 11,433 | 11073568 | import numpy
import torch
from allennlp.modules.span_extractors import SpanExtractor, EndpointSpanExtractor
from allennlp.common.params import Params
from allennlp.nn.util import batched_index_select
class TestEndpointSpanExtractor:
def test_endpoint_span_extractor_can_build_from_params(self):
params = Params(
{
"type": "endpoint",
"input_dim": 7,
"num_width_embeddings": 5,
"span_width_embedding_dim": 3,
}
)
extractor = SpanExtractor.from_params(params)
assert isinstance(extractor, EndpointSpanExtractor)
assert extractor.get_output_dim() == 17 # 2 * input_dim + span_width_embedding_dim
def test_correct_sequence_elements_are_embedded(self):
sequence_tensor = torch.randn([2, 5, 7])
# Concatentate start and end points together to form our representation.
extractor = EndpointSpanExtractor(7, "x,y")
indices = torch.LongTensor([[[1, 3], [2, 4]], [[0, 2], [3, 4]]])
span_representations = extractor(sequence_tensor, indices)
assert list(span_representations.size()) == [2, 2, 14]
assert extractor.get_output_dim() == 14
assert extractor.get_input_dim() == 7
start_indices, end_indices = indices.split(1, -1)
# We just concatenated the start and end embeddings together, so
# we can check they match the original indices if we split them apart.
start_embeddings, end_embeddings = span_representations.split(7, -1)
correct_start_embeddings = batched_index_select(sequence_tensor, start_indices.squeeze())
correct_end_embeddings = batched_index_select(sequence_tensor, end_indices.squeeze())
numpy.testing.assert_array_equal(
start_embeddings.data.numpy(), correct_start_embeddings.data.numpy()
)
numpy.testing.assert_array_equal(
end_embeddings.data.numpy(), correct_end_embeddings.data.numpy()
)
def test_masked_indices_are_handled_correctly(self):
sequence_tensor = torch.randn([2, 5, 7])
# concatentate start and end points together to form our representation.
extractor = EndpointSpanExtractor(7, "x,y")
indices = torch.LongTensor([[[1, 3], [2, 4]], [[0, 2], [3, 4]]])
span_representations = extractor(sequence_tensor, indices)
# Make a mask with the second batch element completely masked.
indices_mask = torch.tensor([[True, True], [False, False]])
span_representations = extractor(sequence_tensor, indices, span_indices_mask=indices_mask)
start_embeddings, end_embeddings = span_representations.split(7, -1)
start_indices, end_indices = indices.split(1, -1)
correct_start_embeddings = batched_index_select(
sequence_tensor, start_indices.squeeze()
).data
# Completely masked second batch element, so it should all be zero.
correct_start_embeddings[1, :, :].fill_(0)
correct_end_embeddings = batched_index_select(sequence_tensor, end_indices.squeeze()).data
correct_end_embeddings[1, :, :].fill_(0)
numpy.testing.assert_array_equal(
start_embeddings.data.numpy(), correct_start_embeddings.numpy()
)
numpy.testing.assert_array_equal(
end_embeddings.data.numpy(), correct_end_embeddings.numpy()
)
def test_masked_indices_are_handled_correctly_with_exclusive_indices(self):
sequence_tensor = torch.randn([2, 5, 8])
# concatentate start and end points together to form our representation
# for both the forward and backward directions.
extractor = EndpointSpanExtractor(8, "x,y", use_exclusive_start_indices=True)
indices = torch.LongTensor([[[1, 3], [2, 4]], [[0, 2], [0, 1]]])
sequence_mask = torch.tensor(
[[True, True, True, True, True], [True, True, True, False, False]]
)
span_representations = extractor(sequence_tensor, indices, sequence_mask=sequence_mask)
# We just concatenated the start and end embeddings together, so
# we can check they match the original indices if we split them apart.
start_embeddings, end_embeddings = span_representations.split(8, -1)
correct_start_indices = torch.LongTensor([[0, 1], [-1, -1]])
# These indices should be -1, so they'll be replaced with a sentinel. Here,
# we'll set them to a value other than -1 so we can index select the indices and
# replace them later.
correct_start_indices[1, 0] = 1
correct_start_indices[1, 1] = 1
correct_end_indices = torch.LongTensor([[3, 4], [2, 1]])
correct_start_embeddings = batched_index_select(
sequence_tensor.contiguous(), correct_start_indices
)
# This element had sequence_tensor index of 0, so it's exclusive index is the start sentinel.
correct_start_embeddings[1, 0] = extractor._start_sentinel.data
correct_start_embeddings[1, 1] = extractor._start_sentinel.data
numpy.testing.assert_array_equal(
start_embeddings.data.numpy(), correct_start_embeddings.data.numpy()
)
correct_end_embeddings = batched_index_select(
sequence_tensor.contiguous(), correct_end_indices
)
numpy.testing.assert_array_equal(
end_embeddings.data.numpy(), correct_end_embeddings.data.numpy()
)
|
models/nndistance/functions/nnd.py | GuillaumeDufau/3D-point-capsule-networks | 283 | 11073603 | # functions/add.py
import torch
from torch.autograd import Function
#from _ext import my_lib
import my_lib_cuda as my_lib
class NNDFunction(Function):
def forward(self, xyz1, xyz2):
batchsize, n, _ = xyz1.size()
_, m, _ = xyz2.size()
# self.xyz1 = xyz1[...]
# self.xyz2 = xyz2[...]
dist1 = torch.zeros(batchsize, n)
dist2 = torch.zeros(batchsize, m)
idx1 = torch.zeros(batchsize, n).type(torch.IntTensor)
idx2 = torch.zeros(batchsize, m).type(torch.IntTensor)
if not xyz1.is_cuda:
my_lib.nnd_forward(xyz1, xyz2, dist1, dist2, idx1, idx2)
else:
dist1 = dist1.cuda()
dist2 = dist2.cuda()
idx1 = idx1.cuda()
idx2 = idx2.cuda()
my_lib.nnd_forward_cuda(xyz1, xyz2, dist1, dist2, idx1, idx2)
# self.dist1 = dist1
# self.dist2 = dist2
#print(batchsize, n, m)
self.save_for_backward(xyz1,xyz2,dist1,dist2,idx1,idx2)
return dist1, dist2
def backward(self, graddist1, graddist2):
#print(self.idx1, self.idx2)
xyz1,xyz2,dist1,dist2,idx1,idx2 = self.saved_tensors
graddist1 = graddist1.contiguous()
graddist2 = graddist2.contiguous()
gradxyz1 = torch.zeros(xyz1.size())
gradxyz2 = torch.zeros(xyz2.size())
if not graddist1.is_cuda:
my_lib.nnd_backward(xyz1, xyz2, gradxyz1, gradxyz2, graddist1, graddist2, idx1, idx2)
else:
gradxyz1 = gradxyz1.cuda()
gradxyz2 = gradxyz2.cuda()
my_lib.nnd_backward_cuda(xyz1, xyz2, gradxyz1, gradxyz2, graddist1, graddist2, idx1, idx2)
# print(gradxyz1)
# print(gradxyz2)
# print(dist1)
# print(dist2)
# print(idx1)
# print(idx2)
return gradxyz1, gradxyz2 |
airsenal/__init__.py | Abelarm/AIrsenal | 144 | 11073607 | <reponame>Abelarm/AIrsenal
"""
___init__.py for airsenal
"""
import os
import tempfile
# AIrsenal package version.
__version__ = "1.2.0"
# Cross-platform temporary directory
TMPDIR = "/tmp/" if os.name == "posix" else tempfile.gettempdir()
|
torchsat_imc/models/utils.py | Exdenta/torchsat | 316 | 11073615 |
from .classification.vgg import vgg11, vgg11_bn, vgg13, vgg13_bn, vgg16, vgg16_bn, vgg19_bn, vgg19
from .classification.resnet import resnet18, resnet34, resnet50, resnet101, resnet152, resnext50_32x4d, resnext101_32x8d, wide_resnet50_2, wide_resnet101_2
from .classification.densenet import densenet121, densenet169, densenet201
from .classification.inception import inception_v3
from .classification.mobilenet import mobilenet_v2
from .classification.efficientnet import efficientnet_b0, efficientnet_b1, efficientnet_b2, efficientnet_b3, efficientnet_b4, efficientnet_b5, efficientnet_b6, efficientnet_b7
from .classification.resnest import resnest50, resnest101, resnest200, resnest269
from .segmentation.unet import unet34, unet101, unet152
__all__ = ["get_model"]
models = {
'vgg11': vgg11,
'vgg11_bn': vgg11_bn,
'vgg13': vgg13,
'vgg13_bn': vgg13_bn,
'vgg16': vgg16,
'vgg16_bn': vgg16_bn,
'vgg19_bn': vgg19_bn,
'vgg19': vgg19,
'resnet18': resnet18,
'resnet34': resnet34,
'resnet50': resnet50,
'resnet101': resnet101,
'resnet152': resnet152,
'resnext50_32x4d': resnext50_32x4d,
'resnext101_32x8d': resnext101_32x8d,
'wide_resnet50_2': wide_resnet50_2,
'wide_resnet101_2': wide_resnet101_2,
'mobilenet_v2': mobilenet_v2,
'inception_v3': inception_v3,
'densenet121': densenet121,
'densenet169': densenet169,
'densenet201': densenet201,
'efficientnet_b0': efficientnet_b0,
'efficientnet_b1': efficientnet_b1,
'efficientnet_b2': efficientnet_b2,
'efficientnet_b3': efficientnet_b3,
'efficientnet_b4': efficientnet_b4,
'efficientnet_b5': efficientnet_b5,
'efficientnet_b6': efficientnet_b6,
'efficientnet_b7': efficientnet_b7,
'resnest50': resnest50,
'resnest101': resnest101,
'resnest200': resnest200,
'resnest269': resnest269,
'unet34': unet34,
'unet101': unet101,
'unet152': unet152,
}
def get_model(name: str, num_classes: int, **kwargs):
print(kwargs)
if name.lower() not in models:
raise ValueError("no model named {}, should be one of {}".format(name, ' '.join(models)))
return models.get(name.lower())(num_classes, **kwargs)
|
app/tornado_handlers/browse.py | magate/flight_review | 128 | 11073640 | <reponame>magate/flight_review<filename>app/tornado_handlers/browse.py
"""
Tornado handler for the browse page
"""
from __future__ import print_function
import collections
import sys
import os
from datetime import datetime
import json
import sqlite3
import tornado.web
# this is needed for the following imports
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../plot_app'))
from config import get_db_filename, get_overview_img_filepath
from db_entry import DBData, DBDataGenerated
from helper import flight_modes_table, get_airframe_data, html_long_word_force_break
#pylint: disable=relative-beyond-top-level,too-many-statements
from .common import get_jinja_env, get_generated_db_data_from_log
BROWSE_TEMPLATE = 'browse.html'
#pylint: disable=abstract-method
class BrowseDataRetrievalHandler(tornado.web.RequestHandler):
""" Ajax data retrieval handler """
def get(self, *args, **kwargs):
""" GET request """
search_str = self.get_argument('search[value]', '').lower()
order_ind = int(self.get_argument('order[0][column]'))
order_dir = self.get_argument('order[0][dir]', '').lower()
data_start = int(self.get_argument('start'))
data_length = int(self.get_argument('length'))
draw_counter = int(self.get_argument('draw'))
json_output = dict()
json_output['draw'] = draw_counter
# get the logs (but only the public ones)
con = sqlite3.connect(get_db_filename(), detect_types=sqlite3.PARSE_DECLTYPES)
cur = con.cursor()
sql_order = ' ORDER BY Date DESC'
ordering_col = ['',#table row number
'Logs.Date',
'',#Overview - img
'Logs.Description',
'LogsGenerated.MavType',
'',#Airframe - not from DB
'LogsGenerated.Hardware',
'LogsGenerated.Software',
'LogsGenerated.Duration',
'LogsGenerated.StartTime',
'',#Rating
'LogsGenerated.NumLoggedErrors',
'' #FlightModes
]
if ordering_col[order_ind] != '':
sql_order = ' ORDER BY ' + ordering_col[order_ind]
if order_dir == 'desc':
sql_order += ' DESC'
cur.execute('SELECT Logs.Id, Logs.Date, '
' Logs.Description, Logs.WindSpeed, '
' Logs.Rating, Logs.VideoUrl, '
' LogsGenerated.* '
'FROM Logs '
' LEFT JOIN LogsGenerated on Logs.Id=LogsGenerated.Id '
'WHERE Logs.Public = 1 AND NOT Logs.Source = "CI" '
+sql_order)
# pylint: disable=invalid-name
Columns = collections.namedtuple("Columns", "columns search_only_columns")
def get_columns_from_tuple(db_tuple, counter):
""" load the columns (list of strings) from a db_tuple
"""
db_data = DBDataJoin()
log_id = db_tuple[0]
log_date = db_tuple[1].strftime('%Y-%m-%d')
db_data.description = db_tuple[2]
db_data.feedback = ''
db_data.type = ''
db_data.wind_speed = db_tuple[3]
db_data.rating = db_tuple[4]
db_data.video_url = db_tuple[5]
generateddata_log_id = db_tuple[6]
if log_id != generateddata_log_id:
print('Join failed, loading and updating data')
db_data_gen = get_generated_db_data_from_log(log_id, con, cur)
if db_data_gen is None:
return None
db_data.add_generated_db_data_from_log(db_data_gen)
else:
db_data.duration_s = db_tuple[7]
db_data.mav_type = db_tuple[8]
db_data.estimator = db_tuple[9]
db_data.sys_autostart_id = db_tuple[10]
db_data.sys_hw = db_tuple[11]
db_data.ver_sw = db_tuple[12]
db_data.num_logged_errors = db_tuple[13]
db_data.num_logged_warnings = db_tuple[14]
db_data.flight_modes = \
{int(x) for x in db_tuple[15].split(',') if len(x) > 0}
db_data.ver_sw_release = db_tuple[16]
db_data.vehicle_uuid = db_tuple[17]
db_data.flight_mode_durations = \
[tuple(map(int, x.split(':'))) for x in db_tuple[18].split(',') if len(x) > 0]
db_data.start_time_utc = db_tuple[19]
# bring it into displayable form
ver_sw = db_data.ver_sw
if len(ver_sw) > 10:
ver_sw = ver_sw[:6]
if len(db_data.ver_sw_release) > 0:
try:
release_split = db_data.ver_sw_release.split()
release_type = int(release_split[1])
if release_type == 255: # it's a release
ver_sw = release_split[0]
except:
pass
airframe_data = get_airframe_data(db_data.sys_autostart_id)
if airframe_data is None:
airframe = db_data.sys_autostart_id
else:
airframe = airframe_data['name']
flight_modes = ', '.join([flight_modes_table[x][0]
for x in db_data.flight_modes if x in
flight_modes_table])
m, s = divmod(db_data.duration_s, 60)
h, m = divmod(m, 60)
duration_str = '{:d}:{:02d}:{:02d}'.format(h, m, s)
start_time_str = 'N/A'
if db_data.start_time_utc != 0:
try:
start_datetime = datetime.fromtimestamp(db_data.start_time_utc)
start_time_str = start_datetime.strftime("%Y-%m-%d %H:%M")
except ValueError as value_error:
# bogus date
print(value_error)
# make sure to break long descriptions w/o spaces (otherwise they
# mess up the layout)
description = html_long_word_force_break(db_data.description)
search_only_columns = []
if db_data.ver_sw is not None:
search_only_columns.append(db_data.ver_sw)
if db_data.ver_sw_release is not None:
search_only_columns.append(db_data.ver_sw_release)
if db_data.vehicle_uuid is not None:
search_only_columns.append(db_data.vehicle_uuid)
image_col = '<div class="no_map_overview"> Not rendered / No GPS </div>'
image_filename = os.path.join(get_overview_img_filepath(), log_id+'.png')
if os.path.exists(image_filename):
image_col = '<img class="map_overview" src="/overview_img/'
image_col += log_id+'.png" alt="Overview Image Load Failed" height=50/>'
return Columns([
counter,
'<a href="plot_app?log='+log_id+'">'+log_date+'</a>',
image_col,
description,
db_data.mav_type,
airframe,
db_data.sys_hw,
ver_sw,
duration_str,
start_time_str,
db_data.rating_str(),
db_data.num_logged_errors,
flight_modes
], search_only_columns)
# need to fetch all here, because we will do more SQL calls while
# iterating (having multiple cursor's does not seem to work)
db_tuples = cur.fetchall()
json_output['recordsTotal'] = len(db_tuples)
json_output['data'] = []
if data_length == -1:
data_length = len(db_tuples)
filtered_counter = 0
if search_str == '':
# speed-up the request by iterating only over the requested items
counter = data_start
for i in range(data_start, min(data_start + data_length, len(db_tuples))):
counter += 1
columns = get_columns_from_tuple(db_tuples[i], counter)
if columns is None:
continue
json_output['data'].append(columns.columns)
filtered_counter = len(db_tuples)
else:
counter = 1
for db_tuple in db_tuples:
counter += 1
columns = get_columns_from_tuple(db_tuple, counter)
if columns is None:
continue
if any(search_str in str(column).lower() for column in \
(columns.columns, columns.search_only_columns)):
if data_start <= filtered_counter < data_start + data_length:
json_output['data'].append(columns.columns)
filtered_counter += 1
cur.close()
con.close()
json_output['recordsFiltered'] = filtered_counter
self.set_header('Content-Type', 'application/json')
self.write(json.dumps(json_output))
class DBDataJoin(DBData, DBDataGenerated):
"""Class for joined Data"""
def add_generated_db_data_from_log(self, source):
"""Update joined data by parent data"""
self.__dict__.update(source.__dict__)
class BrowseHandler(tornado.web.RequestHandler):
""" Browse public log file Tornado request handler """
def get(self, *args, **kwargs):
""" GET request """
template = get_jinja_env().get_template(BROWSE_TEMPLATE)
template_args = {}
search_str = self.get_argument('search', '').lower()
if len(search_str) > 0:
template_args['initial_search'] = json.dumps(search_str)
self.write(template.render(template_args))
|
tos/lib/ppp/tests/Lcp/test-startup.py | mtaghiza/tinyos-main-1 | 1,053 | 11073649 | <filename>tos/lib/ppp/tests/Lcp/test-startup.py
import surf
import sys
import binascii
import struct
import ppp4py.hdlc
import ppp4py.protocol.base
surf = surf.Device()
framer = ppp4py.hdlc.HDLCforPPP()
pppd = ppp4py.PPP(framer=framer)
import pppprint
pppprint.PppPrintProtocol(pppd)
lcp = pppd._lcp
lcp._configureRequest.clear()
for opt in lcp.options():
if opt.isNegotiable():
lcp._configureRequest.appendOption(opt, opt.proposedLocalValue())
cr_frame = lcp.pack(lcp._configureRequest.pack())
#info = framer.framePacket(cr_frame)
#print binascii.hexlify(info)
#print pppd.decode(info, is_framed=False)
frame = framer.framePacket(cr_frame, compress_ac=False)
print 'TX: %s' % (binascii.hexlify(frame),)
surf.write(frame)
while True:
pkt = surf.getPacket(framer)
(protocol, information) = pppd.decodePacket(pkt, is_framed=False)
if isinstance(protocol, ppp4py.protocol.base.Protocol):
print protocol.decode(information)
else:
print 'Protocol %04x: %s' % (protocol, binascii.hexlify(information))
|
src/ui/LPC_uidef.py | ufo2011/NXP-MCUBootUtility | 174 | 11073652 | <filename>src/ui/LPC_uidef.py
import wx
import sys, os
kConnectStep_Normal = 1
kBootDevice_InternalNor = 'On-chip NOR' #'C040HD NOR'
kBootDevice_QuadspiNor = 'QUADSPI NOR'
kBootDevice_v3_0_0 = [kBootDevice_InternalNor]
kBootDevice_Latest = kBootDevice_v3_0_0
kSecureBootType_PlainUnsigned = 'Plain Unsigned Image Boot'
kSecureBootType_v3_0_0 = [kSecureBootType_PlainUnsigned]
kSecureBootType_Latest = kSecureBootType_v3_0_0
kMemBlockColor_Image = wx.BLUE
|
dash/_callback.py | emilhe/dash | 17,143 | 11073692 | import collections
from functools import wraps
from .dependencies import (
handle_callback_args,
handle_grouped_callback_args,
Output,
)
from .exceptions import PreventUpdate
from ._grouping import (
flatten_grouping,
make_grouping_by_index,
grouping_len,
)
from ._utils import (
create_callback_id,
stringify_id,
to_json,
)
from . import _validate
class NoUpdate:
# pylint: disable=too-few-public-methods
pass
GLOBAL_CALLBACK_LIST = []
GLOBAL_CALLBACK_MAP = {}
GLOBAL_INLINE_SCRIPTS = []
def callback(*_args, **_kwargs):
"""
Normally used as a decorator, `@dash.callback` provides a server-side
callback relating the values of one or more `Output` items to one or
more `Input` items which will trigger the callback when they change,
and optionally `State` items which provide additional information but
do not trigger the callback directly.
`@dash.callback` is an alternative to `@app.callback` (where `app = dash.Dash()`)
introduced in Dash 2.0.
It allows you to register callbacks without defining or importing the `app`
object. The call signature is identical and it can be used instead of `app.callback`
in all cases.
The last, optional argument `prevent_initial_call` causes the callback
not to fire when its outputs are first added to the page. Defaults to
`False` and unlike `app.callback` is not configurable at the app level.
"""
return register_callback(
GLOBAL_CALLBACK_LIST,
GLOBAL_CALLBACK_MAP,
False,
*_args,
**_kwargs,
)
def clientside_callback(clientside_function, *args, **kwargs):
return register_clientside_callback(
GLOBAL_CALLBACK_LIST,
GLOBAL_CALLBACK_MAP,
False,
GLOBAL_INLINE_SCRIPTS,
clientside_function,
*args,
**kwargs,
)
def insert_callback(
callback_list,
callback_map,
config_prevent_initial_callbacks,
output,
outputs_indices,
inputs,
state,
inputs_state_indices,
prevent_initial_call,
):
if prevent_initial_call is None:
prevent_initial_call = config_prevent_initial_callbacks
callback_id = create_callback_id(output)
callback_spec = {
"output": callback_id,
"inputs": [c.to_dict() for c in inputs],
"state": [c.to_dict() for c in state],
"clientside_function": None,
"prevent_initial_call": prevent_initial_call,
}
callback_map[callback_id] = {
"inputs": callback_spec["inputs"],
"state": callback_spec["state"],
"outputs_indices": outputs_indices,
"inputs_state_indices": inputs_state_indices,
}
callback_list.append(callback_spec)
return callback_id
def register_callback(
callback_list, callback_map, config_prevent_initial_callbacks, *_args, **_kwargs
):
(
output,
flat_inputs,
flat_state,
inputs_state_indices,
prevent_initial_call,
) = handle_grouped_callback_args(_args, _kwargs)
if isinstance(output, Output):
# Insert callback with scalar (non-multi) Output
insert_output = output
multi = False
else:
# Insert callback as multi Output
insert_output = flatten_grouping(output)
multi = True
output_indices = make_grouping_by_index(output, list(range(grouping_len(output))))
callback_id = insert_callback(
callback_list,
callback_map,
config_prevent_initial_callbacks,
insert_output,
output_indices,
flat_inputs,
flat_state,
inputs_state_indices,
prevent_initial_call,
)
# pylint: disable=too-many-locals
def wrap_func(func):
@wraps(func)
def add_context(*args, **kwargs):
output_spec = kwargs.pop("outputs_list")
_validate.validate_output_spec(insert_output, output_spec, Output)
func_args, func_kwargs = _validate.validate_and_group_input_args(
args, inputs_state_indices
)
# don't touch the comment on the next line - used by debugger
output_value = func(*func_args, **func_kwargs) # %% callback invoked %%
if isinstance(output_value, NoUpdate):
raise PreventUpdate
if not multi:
output_value, output_spec = [output_value], [output_spec]
flat_output_values = output_value
else:
if isinstance(output_value, (list, tuple)):
# For multi-output, allow top-level collection to be
# list or tuple
output_value = list(output_value)
# Flatten grouping and validate grouping structure
flat_output_values = flatten_grouping(output_value, output)
_validate.validate_multi_return(
output_spec, flat_output_values, callback_id
)
component_ids = collections.defaultdict(dict)
has_update = False
for val, spec in zip(flat_output_values, output_spec):
if isinstance(val, NoUpdate):
continue
for vali, speci in (
zip(val, spec) if isinstance(spec, list) else [[val, spec]]
):
if not isinstance(vali, NoUpdate):
has_update = True
id_str = stringify_id(speci["id"])
component_ids[id_str][speci["property"]] = vali
if not has_update:
raise PreventUpdate
response = {"response": component_ids, "multi": True}
try:
jsonResponse = to_json(response)
except TypeError:
_validate.fail_callback_output(output_value, output)
return jsonResponse
callback_map[callback_id]["callback"] = add_context
return add_context
return wrap_func
_inline_clientside_template = """
var clientside = window.dash_clientside = window.dash_clientside || {{}};
var ns = clientside["{namespace}"] = clientside["{namespace}"] || {{}};
ns["{function_name}"] = {clientside_function};
"""
def register_clientside_callback(
callback_list,
callback_map,
config_prevent_initial_callbacks,
inline_scripts,
clientside_function,
*args,
**kwargs
):
output, inputs, state, prevent_initial_call = handle_callback_args(args, kwargs)
insert_callback(
callback_list,
callback_map,
config_prevent_initial_callbacks,
output,
None,
inputs,
state,
None,
prevent_initial_call,
)
# If JS source is explicitly given, create a namespace and function
# name, then inject the code.
if isinstance(clientside_function, str):
out0 = output
if isinstance(output, (list, tuple)):
out0 = output[0]
namespace = "_dashprivate_{}".format(out0.component_id)
function_name = "{}".format(out0.component_property)
inline_scripts.append(
_inline_clientside_template.format(
namespace=namespace.replace('"', '\\"'),
function_name=function_name.replace('"', '\\"'),
clientside_function=clientside_function,
)
)
# Callback is stored in an external asset.
else:
namespace = clientside_function.namespace
function_name = clientside_function.function_name
callback_list[-1]["clientside_function"] = {
"namespace": namespace,
"function_name": function_name,
}
|
vega/trainer/trainer_ms.py | This-50m/vega | 724 | 11073708 | # -*- coding: utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""Mindspore Trainer."""
import os
from mindspore import context
from mindspore.train import Model as MsModel
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor, TimeMonitor
from mindspore.train.loss_scale_manager import FixedLossScaleManager
from mindspore import save_checkpoint
from vega.trainer.callbacks.ms_callbacks import EvalCallBack
import vega
from vega.trainer.trainer_base import TrainerBase
from vega.trainer.modules.optimizer import Optimizer
from vega.trainer.modules.lr_schedulers import LrScheduler
from vega.modules.loss import Loss
from vega.common import ClassFactory, ClassType
import logging
from vega.common.general import General
@ClassFactory.register(ClassType.TRAINER)
class TrainerMs(TrainerBase):
"""Trainer mindspore class."""
def build(self):
"""Build the trainer by assembling the necessary components."""
super().build()
no_decay_params = self.config.optimizer.params.get("no_decay_params", [])
if self.config.lr_scheduler.params:
self.lr_scheduler = LrScheduler()
dynamic_lr = self.lr_scheduler()(base_lr=self.config.optimizer.params["lr"],
global_step=self.config.epochs * len(self.train_loader),
total_epoch=self.config.epochs)
self.optimizer = Optimizer()(model=self.model, dynamic_lr=dynamic_lr, no_decay_params=no_decay_params)
else:
self.optimizer = Optimizer()(model=self.model, no_decay_params=no_decay_params)
logging.info(f"The optimizer is {self.optimizer}.")
if hasattr(self.model, 'add_loss'):
loss_cls = Loss()()
self.model.add_loss(loss_cls)
self.loss = self.model.overall_loss()
else:
self.loss = Loss()()
self.metric_name = self.config.metric.type
# Some trainer has different train batch size from valid batch
self.train_metrics = None
self.valid_metrics = self._init_metrics()
self.ms_metrics = self.valid_metrics() if isinstance(self.valid_metrics(), dict) else {
self.metric_name: self.valid_metrics()}
if self.use_amp:
loss_scale = FixedLossScaleManager(self.config.loss_scale, drop_overflow_update=False)
logging.info(f"Use auto mix precision, and loss scale is {self.config.loss_scale},"
f"loss_scale_manager is {loss_scale}.")
self.ms_model = MsModel(network=self.model,
loss_fn=self.loss,
optimizer=self.optimizer,
metrics=self.ms_metrics,
loss_scale_manager=loss_scale,
amp_level=self.config.opt_level,
keep_batchnorm_fp32=self.config.keep_batchnorm_fp32)
else:
self.ms_model = MsModel(network=self.model,
loss_fn=self.loss,
optimizer=self.optimizer,
metrics=self.ms_metrics)
if not self.config.with_train:
save_path = self.get_local_worker_path(self.step_name, self.worker_id)
ckpt_file_name = os.path.join(save_path, "model_" + str(self.worker_id) + ".ckpt")
save_checkpoint(self.model, ckpt_file_name)
logging.info("Save checkpoint file without training.")
def init_env(self):
"""Init mindspore trainer environment."""
super().init_env()
self._init_ms_context()
def _train_epoch(self):
config_ck = CheckpointConfig(save_checkpoint_steps=self.config.save_steps, keep_checkpoint_max=1)
# save the network model and parameters for subsequence fine-tuning
save_path = self.get_local_worker_path(self.step_name, self.worker_id)
ckpoint_cb = ModelCheckpoint(config=config_ck, directory=save_path)
loss_cb = LossMonitor(per_print_times=1)
time_cb = TimeMonitor(data_size=self.train_loader.get_dataset_size())
callback_list = [ckpoint_cb, loss_cb, time_cb]
if self.config.eval_per_epoch and not self.config.mixup:
eval_cb = EvalCallBack(self.ms_model, self.valid_loader, self.dataset_sink_mode, self)
callback_list.append(eval_cb)
try:
self.ms_model.train(epoch=self.epochs,
train_dataset=self.train_loader,
callbacks=callback_list,
dataset_sink_mode=self.dataset_sink_mode)
except RuntimeError as e:
logging.warning(f"failed to train the model, skip it, message: {str(e)}")
def _valid_epoch(self):
if self.config.mixup and self.config.loss.type == 'CrossEntropyLoss':
from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits
loss_fn = SoftmaxCrossEntropyWithLogits(sparse=True)
self.ms_model = MsModel(network=self.model,
loss_fn=loss_fn,
optimizer=self.optimizer,
metrics=self.ms_metrics)
self.callbacks.before_valid()
try:
eval_metrics = self.ms_model.eval(valid_dataset=self.valid_loader,
dataset_sink_mode=self.dataset_sink_mode)
self.valid_metrics.update(eval_metrics)
valid_logs = dict()
valid_logs['cur_valid_perfs'] = self.valid_metrics.results
self.callbacks.after_valid(valid_logs)
except RuntimeError as exc:
logging.warning("RuntimeError occurred when eval the model. Skip eval this model.")
logging.warning("The RuntimeError message is : {}.".format(exc))
def _init_ms_context(self):
mode = General.ms_execute_mode
logging.info(f"Run train/val in mode: {mode}.")
if vega.is_npu_device():
logging.info(f"minspore context, mode: {context.get_context('mode')}, "
f"target: {context.get_context('device_target')}, "
f"device_id: {context.get_context('device_id')}")
logging.info(f"DEVICE_ID: {os.environ['DEVICE_ID']}")
context.set_context(mode=mode, device_target="Ascend")
else:
context.set_context(mode=mode, device_target="CPU")
self.dataset_sink_mode = General.dataset_sink_mode
logging.info(f"Dataset_sink_mode:{self.dataset_sink_mode}.")
|
modules/webshells/wso.py | decidedlygray/ptf | 4,391 | 11073714 | <reponame>decidedlygray/ptf<gh_stars>1000+
#!/usr/bin/env python
#####################################
# Installation module for WSO webshell from a backup source.
#####################################
# AUTHOR OF MODULE NAME
AUTHOR="<NAME> (Josexv1)"
# DESCRIPTION OF THE MODULE
DESCRIPTION="This module will install/update WSO - PHP webshell by Hardlinux"
# INSTALL TYPE GIT, SVN, FILE DOWNLOAD
# OPTIONS = GIT, SVN, FILE
INSTALL_TYPE="GIT"
# LOCATION OF THE FILE OR GIT/SVN REPOSITORY
REPOSITORY_LOCATION="https://github.com/Josexv1/wso-webshell/"
# WHERE DO YOU WANT TO INSTALL IT
INSTALL_LOCATION="wso"
# DEPENDS FOR DEBIAN INSTALLS
DEBIAN="git"
# DEPENDS FOR FEDORA INSTALLS
FEDORA="git"
# COMMANDS TO RUN AFTER
AFTER_COMMANDS=""
# LAUNCHER
LAUNCHER=""
|
WebMirror/management/rss_parser_funcs/feed_parse_extractSubudai11.py | fake-name/ReadableWebProxy | 193 | 11073728 | def extractSubudai11(item):
"""
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or 'preview' in item['title'].lower():
return None
if 'Mai Kitsune Waifu Chapter' in item['title']:
return buildReleaseMessageWithType(item, 'My Fox Immortal Wife', vol, chp, frag=frag, postfix=postfix)
if 'My Beautiful Teacher Chapter' in item['title']:
return buildReleaseMessageWithType(item, 'My Beautiful Teacher', vol, chp, frag=frag, postfix=postfix)
if 'Awakening – 仿如昨日' in item['title']:
return buildReleaseMessageWithType(item, 'Awakening – 仿如昨日', vol, chp, frag=frag, postfix=postfix)
if 'Awakening' in item['title']:
return buildReleaseMessageWithType(item, 'Awakening – 仿如昨日', vol, chp, frag=frag, postfix=postfix)
return False
|
databuilder/databuilder/models/schema/schema.py | jdavidheiser/amundsen | 139 | 11073732 | <reponame>jdavidheiser/amundsen<filename>databuilder/databuilder/models/schema/schema.py
# Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
import re
from typing import (
Any, Iterator, Union,
)
from amundsen_rds.models import RDSModel
from amundsen_rds.models.schema import (
Schema as RDSSchema, SchemaDescription as RDSSchemaDescription,
SchemaProgrammaticDescription as RDSSchemaProgrammaticDescription,
)
from databuilder.models.graph_node import GraphNode
from databuilder.models.graph_relationship import GraphRelationship
from databuilder.models.graph_serializable import GraphSerializable
from databuilder.models.schema.schema_constant import (
SCHEMA_KEY_PATTERN_REGEX, SCHEMA_NAME_ATTR, SCHEMA_NODE_LABEL,
)
from databuilder.models.table_metadata import DescriptionMetadata
from databuilder.models.table_serializable import TableSerializable
class SchemaModel(GraphSerializable, TableSerializable):
def __init__(self,
schema_key: str,
schema: str,
description: str=None,
description_source: str=None,
**kwargs: Any
) -> None:
self._schema_key = schema_key
self._schema = schema
self._cluster_key = self._get_cluster_key(schema_key)
self._description = DescriptionMetadata.create_description_metadata(text=description,
source=description_source) \
if description else None
self._node_iterator = self._create_node_iterator()
self._relation_iterator = self._create_relation_iterator()
self._record_iterator = self._create_record_iterator()
def create_next_node(self) -> Union[GraphNode, None]:
try:
return next(self._node_iterator)
except StopIteration:
return None
def _create_node_iterator(self) -> Iterator[GraphNode]:
node = GraphNode(
key=self._schema_key,
label=SCHEMA_NODE_LABEL,
attributes={
SCHEMA_NAME_ATTR: self._schema,
}
)
yield node
if self._description:
yield self._description.get_node(self._get_description_node_key())
def create_next_relation(self) -> Union[GraphRelationship, None]:
try:
return next(self._relation_iterator)
except StopIteration:
return None
def create_next_record(self) -> Union[RDSModel, None]:
try:
return next(self._record_iterator)
except StopIteration:
return None
def _create_record_iterator(self) -> Iterator[RDSModel]:
schema_record = RDSSchema(
rk=self._schema_key,
name=self._schema,
cluster_rk=self._cluster_key
)
yield schema_record
if self._description:
if self._description.label == DescriptionMetadata.DESCRIPTION_NODE_LABEL:
yield RDSSchemaDescription(
rk=self._get_description_node_key(),
description_source=self._description.source,
description=self._description.text,
schema_rk=self._schema_key
)
else:
yield RDSSchemaProgrammaticDescription(
rk=self._get_description_node_key(),
description_source=self._description.source,
description=self._description.text,
schema_rk=self._schema_key
)
def _get_description_node_key(self) -> str:
desc = self._description.get_description_id() if self._description is not None else ''
return f'{self._schema_key}/{desc}'
def _create_relation_iterator(self) -> Iterator[GraphRelationship]:
if self._description:
yield self._description.get_relation(start_node=SCHEMA_NODE_LABEL,
start_key=self._schema_key,
end_key=self._get_description_node_key())
def _get_cluster_key(self, schema_key: str) -> str:
schema_key_pattern = re.compile(SCHEMA_KEY_PATTERN_REGEX)
schema_key_match = schema_key_pattern.match(schema_key)
if not schema_key_match:
raise Exception(f'{schema_key} does not match the schema key pattern')
cluster_key = schema_key_match.group(1)
return cluster_key
|
tests/unit/domain/test_helpers.py | expobrain/import-linter | 171 | 11073760 | <reponame>expobrain/import-linter<gh_stars>100-1000
import re
from typing import Dict, List, Union, cast
import pytest
from grimp.adaptors.graph import ImportGraph # type: ignore
from importlinter.domain.helpers import (
MissingImport,
add_imports,
import_expressions_to_imports,
pop_import_expressions,
pop_imports,
)
from importlinter.domain.imports import DirectImport, ImportExpression, Module
class TestPopImports:
IMPORTS = [
dict(
importer="mypackage.green",
imported="mypackage.yellow",
line_number=1,
line_contents="blah",
),
dict(
importer="mypackage.green",
imported="mypackage.blue",
line_number=2,
line_contents="blahblah",
),
dict(
importer="mypackage.blue",
imported="mypackage.green",
line_number=10,
line_contents="blahblahblah",
),
]
def test_succeeds(self):
graph = self._build_graph(imports=self.IMPORTS)
imports_to_pop = self.IMPORTS[0:2]
import_to_leave = self.IMPORTS[2]
result = pop_imports(
graph,
[
DirectImport(importer=Module(i["importer"]), imported=Module(i["imported"]))
for i in imports_to_pop
],
)
assert result == imports_to_pop
assert graph.direct_import_exists(
importer=import_to_leave["importer"], imported=import_to_leave["imported"]
)
assert graph.count_imports() == 1
def test_raises_missing_import_if_module_not_found(self):
graph = self._build_graph(imports=self.IMPORTS)
non_existent_import = DirectImport(
importer=Module("mypackage.nonexistent"),
imported=Module("mypackage.yellow"),
line_number=1,
line_contents="-",
)
with pytest.raises(
MissingImport,
match=re.escape(
"Ignored import mypackage.nonexistent -> mypackage.yellow "
"not present in the graph."
),
):
pop_imports(graph, [non_existent_import])
def test_works_with_multiple_external_imports_from_same_module(self):
imports_to_pop = [
dict(
importer="mypackage.green",
imported="someexternalpackage",
line_number=2,
line_contents="from someexternalpackage import one",
),
dict(
importer="mypackage.green",
imported="someexternalpackage",
line_number=2,
line_contents="from someexternalpackage import two",
),
]
imports = self.IMPORTS + imports_to_pop
graph = self._build_graph(imports=imports)
result = pop_imports(
graph,
[
DirectImport(
importer=Module(i["importer"]),
imported=Module(i["imported"]),
line_number=i["line_number"],
line_contents=i["line_contents"],
)
for i in imports_to_pop
],
)
assert result == imports_to_pop
one_of_the_popped_imports = imports_to_pop[0]
assert not graph.direct_import_exists(
importer=one_of_the_popped_imports["importer"],
imported=one_of_the_popped_imports["imported"],
)
assert graph.count_imports() == len(self.IMPORTS)
def _build_graph(self, imports):
graph = ImportGraph()
for import_ in imports:
graph.add_import(**import_)
return graph
class TestImportExpressionsToImports:
DIRECT_IMPORTS = [
DirectImport(
importer=Module("mypackage.green"),
imported=Module("mypackage.yellow"),
line_number=1,
line_contents="-",
),
DirectImport(
importer=Module("mypackage.green"),
imported=Module("mypackage.blue"),
line_number=1,
line_contents="-",
),
DirectImport(
importer=Module("mypackage.blue"),
imported=Module("mypackage.green"),
line_number=1,
line_contents="-",
),
DirectImport(
importer=Module("mypackage.blue.cats"),
imported=Module("mypackage.purple.dogs"),
line_number=1,
line_contents="-",
),
DirectImport(
importer=Module("mypackage.green.cats"),
imported=Module("mypackage.orange.dogs"),
line_number=1,
line_contents="-",
),
DirectImport(
importer=Module("mypackage.green.cats"),
imported=Module("mypackage.orange.mice"),
line_number=1,
line_contents="-",
),
# Direct imports of external packages can appear more than once, as the external package
# is squashed.
DirectImport(
importer=Module("mypackage.brown"),
imported=Module("someotherpackage"),
line_number=1,
line_contents="from someotherpackage import one",
),
DirectImport(
importer=Module("mypackage.brown"),
imported=Module("someotherpackage"),
line_number=2,
line_contents="from someotherpackage import two",
),
]
@pytest.mark.parametrize(
"description, expressions, expected",
[
(
"No wildcards",
[
ImportExpression(
importer=DIRECT_IMPORTS[0].importer.name,
imported=DIRECT_IMPORTS[0].imported.name,
),
],
[DIRECT_IMPORTS[0]],
),
(
"Importer wildcard",
[
ImportExpression(importer="mypackage.*", imported="mypackage.blue"),
],
[DIRECT_IMPORTS[1]],
),
(
"Imported wildcard",
[
ImportExpression(importer="mypackage.green", imported="mypackage.*"),
],
DIRECT_IMPORTS[0:2],
),
(
"Importer and imported wildcards",
[
ImportExpression(importer="mypackage.*", imported="mypackage.*"),
],
DIRECT_IMPORTS[0:3],
),
(
"Inner wildcard",
[
ImportExpression(importer="mypackage.*.cats", imported="mypackage.*.dogs"),
],
DIRECT_IMPORTS[3:5],
),
(
"Multiple expressions, non-overlapping",
[
ImportExpression(importer="mypackage.green", imported="mypackage.*"),
ImportExpression(
importer="mypackage.green.cats", imported="mypackage.orange.*"
),
],
DIRECT_IMPORTS[0:2] + DIRECT_IMPORTS[4:6],
),
(
"Multiple expressions, overlapping",
[
ImportExpression(importer="mypackage.*", imported="mypackage.blue"),
ImportExpression(importer="mypackage.green", imported="mypackage.blue"),
],
[DIRECT_IMPORTS[1]],
),
(
"Multiple imports of external package with same importer",
[
ImportExpression(importer="mypackage.brown", imported="someotherpackage"),
],
DIRECT_IMPORTS[6:8],
),
],
)
def test_succeeds(self, description, expressions, expected):
graph = self._build_graph(self.DIRECT_IMPORTS)
assert sorted(
import_expressions_to_imports(graph, expressions), key=_direct_import_sort_key
) == sorted(expected, key=_direct_import_sort_key)
def test_raises_missing_import(self):
graph = ImportGraph()
graph.add_module("mypackage")
graph.add_module("other")
graph.add_import(
importer="mypackage.b", imported="other.foo", line_number=1, line_contents="-"
)
expression = ImportExpression(importer="mypackage.a.*", imported="other.foo")
with pytest.raises(MissingImport):
import_expressions_to_imports(graph, [expression])
def _build_graph(self, direct_imports):
graph = ImportGraph()
for direct_import in direct_imports:
graph.add_import(
importer=direct_import.importer.name,
imported=direct_import.imported.name,
line_number=direct_import.line_number,
line_contents=direct_import.line_contents,
)
return graph
class TestPopImportExpressions:
DIRECT_IMPORTS = [
DirectImport(
importer=Module("mypackage.green"),
imported=Module("mypackage.yellow"),
line_number=1,
line_contents="-",
),
DirectImport(
importer=Module("mypackage.green"),
imported=Module("mypackage.blue"),
line_number=1,
line_contents="-",
),
DirectImport(
importer=Module("mypackage.blue"),
imported=Module("mypackage.green"),
line_number=1,
line_contents="-",
),
DirectImport(
importer=Module("mypackage.blue.cats"),
imported=Module("mypackage.purple.dogs"),
line_number=1,
line_contents="-",
),
DirectImport(
importer=Module("mypackage.green.cats"),
imported=Module("mypackage.orange.dogs"),
line_number=1,
line_contents="-",
),
]
def test_succeeds(self):
graph = self._build_graph(self.DIRECT_IMPORTS)
expressions = [
ImportExpression(importer="mypackage.green", imported="mypackage.*"),
# Expressions can overlap.
ImportExpression(importer="mypackage.green", imported="mypackage.blue"),
ImportExpression(importer="mypackage.blue.cats", imported="mypackage.purple.dogs"),
]
popped_imports: List[Dict[str, Union[str, int]]] = pop_import_expressions(
graph, expressions
)
# Cast to direct imports to make comparison easier.
popped_direct_imports: List[DirectImport] = sorted(
map(self._dict_to_direct_import, popped_imports), key=_direct_import_sort_key
)
expected = sorted(
[
self.DIRECT_IMPORTS[0],
self.DIRECT_IMPORTS[1],
self.DIRECT_IMPORTS[3],
],
key=_direct_import_sort_key,
)
assert popped_direct_imports == expected
assert graph.count_imports() == 2
def _build_graph(self, direct_imports):
graph = ImportGraph()
for direct_import in direct_imports:
graph.add_import(
importer=direct_import.importer.name,
imported=direct_import.imported.name,
line_number=direct_import.line_number,
line_contents=direct_import.line_contents,
)
return graph
def _dict_to_direct_import(self, import_details: Dict[str, Union[str, int]]) -> DirectImport:
return DirectImport(
importer=Module(cast(str, import_details["importer"])),
imported=Module(cast(str, import_details["imported"])),
line_number=cast(int, import_details["line_number"]),
line_contents=cast(str, import_details["line_contents"]),
)
def test_add_imports():
graph = ImportGraph()
import_details = [
{"importer": "a", "imported": "b", "line_number": 1, "line_contents": "lorem ipsum"},
{"importer": "c", "imported": "d", "line_number": 2, "line_contents": "lorem ipsum 2"},
]
assert not graph.modules
add_imports(graph, import_details)
assert graph.modules == {"a", "b", "c", "d"}
def _direct_import_sort_key(direct_import: DirectImport):
# Doesn't matter how we sort, just a way of sorting consistently for comparison.
return (
direct_import.importer.name,
direct_import.imported.name,
direct_import.line_number,
)
|
romp/exports/convert_fbx.py | Saafke/ROMP | 385 | 11073771 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
# holder of all proprietary rights on this computer program.
# You can only use this computer program if you have closed
# a license agreement with MPG or you get the right to use the computer
# program from someone who is authorized to grant you that right.
# Any use of the computer program without a valid license is prohibited and
# liable to prosecution.
#
# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
# for Intelligent Systems. All rights reserved.
#
# Contact: <EMAIL>
#
# Author: <NAME>, Max Planck Institute for Intelligent Systems, Perceiving Systems
#
# Create keyframed animated skinned SMPL mesh from .pkl pose description
#
# Generated mesh will be exported in FBX or glTF format
#
# Notes:
# + Male and female gender models only
# + Script can be run from command line or in Blender Editor (Text Editor>Run Script)
# + Command line: Install mathutils module in your bpy virtualenv with 'pip install mathutils==2.81.2'
import os
import sys
import bpy
import time
import platform
# import joblib
import argparse
import numpy as np
import addon_utils
from math import radians
from mathutils import Matrix, Vector, Quaternion, Euler
# Globals
# Add your UNIX paths here!
male_model_path = '/home/yusun/ROMP/model_data/SMPL_unity_v.1.0.0/smpl/Models/SMPL_m_unityDoubleBlends_lbs_10_scale5_207_v1.0.0.fbx'
female_model_path = '/home/yusun/ROMP/model_data/SMPL_unity_v.1.0.0/smpl/Models/SMPL_f_unityDoubleBlends_lbs_10_scale5_207_v1.0.0.fbx'
character_model_path = None
# Handle fall back if files don't exist, also keeping the unix version before attempting the windows version.
plt = platform.system()
if plt == "Windows":
# Add your Windows paths here!
male_model_path = "C:/temp/mocap/smpl/SMPL_m_unityDoubleBlends_lbs_10_scale5_207_v1.0.0.fbx"
female_model_path = "C:/temp/mocap/smpl/SMPL_f_unityDoubleBlends_lbs_10_scale5_207_v1.0.0.fbx"
'''
python romp/exports/convert_fbx.py --input=demo/juntiquan_results/juntiquan_frames_ts_results.npz --output=demo/videos/jtq.fbx --gender=male
'''
fps_source = 24
fps_target = 24
gender = 'male' #female
start_origin = 1
person_id = 0
args = []
bone_name_from_index = {
0 : 'Pelvis',
1 : 'L_Hip',
2 : 'R_Hip',
3 : 'Spine1',
4 : 'L_Knee',
5 : 'R_Knee',
6 : 'Spine2',
7 : 'L_Ankle',
8: 'R_Ankle',
9: 'Spine3',
10: 'L_Foot',
11: 'R_Foot',
12: 'Neck',
13: 'L_Collar',
14: 'R_Collar',
15: 'Head',
16: 'L_Shoulder',
17: 'R_Shoulder',
18: 'L_Elbow',
19: 'R_Elbow',
20: 'L_Wrist',
21: 'R_Wrist',
22: 'L_Hand',
23: 'R_Hand'
}
bone_name_from_index_character = {
0 : 'Hips',
1 : 'RightUpLeg',
2 : 'LeftUpLeg',
3 : 'Spine',
4 : 'RightLeg',
5 : 'LeftLeg',
6 : 'Spine1',
7 : 'RightFoot',
8: 'LeftFoot',
9: 'Spine2',
10: 'LeftToeBase',
11: 'RightToeBase',
12: 'Neck',
13: 'LeftHandIndex1',
14: 'RightHandIndex1',
15: 'Head',
16: 'LeftShoulder',
17: 'RightShoulder',
18: 'LeftArm',
19: 'RightArm',
20: 'LeftForeArm',
21: 'RightForeArm',
22: 'LeftHand',
23: 'RightHand'
}
# Helper functions
# Computes rotation matrix through Rodrigues formula as in cv2.Rodrigues
# Source: smpl/plugins/blender/corrective_bpy_sh.py
def Rodrigues(rotvec):
theta = np.linalg.norm(rotvec)
r = (rotvec/theta).reshape(3, 1) if theta > 0. else rotvec
cost = np.cos(theta)
mat = np.asarray([[0, -r[2], r[1]],
[r[2], 0, -r[0]],
[-r[1], r[0], 0]])
return(cost*np.eye(3) + (1-cost)*r.dot(r.T) + np.sin(theta)*mat)
# Setup scene
def setup_scene(model_path, fps_target):
scene = bpy.data.scenes['Scene']
###########################
# Engine independent setup
###########################
scene.render.fps = fps_target
# Remove default cube
if 'Cube' in bpy.data.objects:
bpy.data.objects['Cube'].select_set(True)
bpy.ops.object.delete()
# Import gender specific .fbx template file
bpy.ops.import_scene.fbx(filepath=model_path)
# Process single pose into keyframed bone orientations
def process_pose(current_frame, pose, trans, pelvis_position):
if pose.shape[0] == 72:
rod_rots = pose.reshape(24, 3)
else:
rod_rots = pose.reshape(26, 3)
mat_rots = [Rodrigues(rod_rot) for rod_rot in rod_rots]
# Set the location of the Pelvis bone to the translation parameter
armature = bpy.data.objects['Armature']
bones = armature.pose.bones
# Pelvis: X-Right, Y-Up, Z-Forward (Blender -Y)
# Set absolute pelvis location relative to Pelvis bone head
try:
bones[bone_name_from_index[0]].location = Vector((100*trans[1], 100*trans[2], 100*trans[0])) - pelvis_position
except :
# Handle missing / wrong gender bones. This will change the models gender if a problem is found and continue on.
bonename = bone_name_from_index[0]
if "m_" in bonename:
for bone in bone_name_from_index:
bone_name_from_index[bone] = bone_name_from_index[bone].replace("m_", "f_")
bone_name_from_index[0] = bonename.replace("m_", "f_")
bones[bonename.replace("m_", "f_")].location = Vector((100*trans[1], 100*trans[2], 100*trans[0])) - pelvis_position
if "f_" in bonename:
for bone in bone_name_from_index:
bone = bone.replace("f_", "m_")
bone_name_from_index[0] = bonename.replace("f_", "m_")
bones[bonename.replace("f_", "m_")].location = Vector((100*trans[1], 100*trans[2], 100*trans[0])) - pelvis_position
# bones['Root'].location = Vector(trans)
bones[bone_name_from_index[0]].keyframe_insert('location', frame=current_frame)
for index, mat_rot in enumerate(mat_rots, 0):
if index >= 24:
continue
bone = bones[bone_name_from_index[index]]
bone_rotation = Matrix(mat_rot).to_quaternion()
quat_x_90_cw = Quaternion((1.0, 0.0, 0.0), radians(-90))
quat_x_n135_cw = Quaternion((1.0, 0.0, 0.0), radians(-135))
quat_x_p45_cw = Quaternion((1.0, 0.0, 0.0), radians(45))
quat_y_90_cw = Quaternion((0.0, 1.0, 0.0), radians(-90))
quat_z_90_cw = Quaternion((0.0, 0.0, 1.0), radians(-90))
if index == 0:
# Rotate pelvis so that avatar stands upright and looks along negative Y avis
bone.rotation_quaternion = (quat_x_90_cw @ quat_z_90_cw) @ bone_rotation
else:
bone.rotation_quaternion = bone_rotation
bone.keyframe_insert('rotation_quaternion', frame=current_frame)
return
# Process all the poses from the pose file
def process_poses(
input_path,
gender,
fps_source,
fps_target,
start_origin,
person_id=0,
):
print('Processing: ' + input_path)
poses, trans = [], []
subject_ids = 0 #list(data.keys())
data = np.load(input_path, allow_pickle=True)['results'][()]
if '_ts_results' in os.path.basename(input_path):
subject_ids = 0
print('Exporting motion sequence of subject {}'.format(subject_ids))
data = data[subject_ids]
frame_nums = list(data.keys())
poses, trans = np.zeros((len(frame_nums), 72)), np.zeros((len(frame_nums), 3))
for inds, frame_id in enumerate(frame_nums):
poses[inds] = data[frame_id]['poses']
trans[inds] = data[frame_id]['trans']
else:
print('Exporting motion sequence of subject {}'.format(subject_ids))
frame_nums = list(data.keys())
poses, trans = np.zeros((len(frame_nums), 72)), np.zeros((len(frame_nums), 3))
for inds, frame_id in enumerate(frame_nums):
poses[inds] = data[frame_id][subject_ids]['poses']
trans[inds] = data[frame_id][subject_ids]['trans']
if gender == 'female':
model_path = female_model_path
for k,v in bone_name_from_index.items():
bone_name_from_index[k] = 'f_avg_' + v
elif gender == 'male':
model_path = male_model_path
for k,v in bone_name_from_index.items():
bone_name_from_index[k] = 'm_avg_' + v
elif gender == 'character':
model_path = character_model_path
for k,v in bone_name_from_index.items():
bone_name_from_index[k] = 'mixamorig1:' + v
else:
print('ERROR: Unsupported gender: ' + gender)
sys.exit(1)
# Limit target fps to source fps
if fps_target > fps_source:
fps_target = fps_source
print('Gender:',gender)
print('Number of source poses: ',poses.shape[0])
print('Source frames-per-second: ', fps_source)
print('Target frames-per-second: ', fps_target)
print('--------------------------------------------------')
setup_scene(model_path, fps_target)
scene = bpy.data.scenes['Scene']
sample_rate = int(fps_source/fps_target)
scene.frame_end = (int)(poses.shape[0]/sample_rate)
# Retrieve pelvis world position.
# Unit is [cm] due to Armature scaling.
# Need to make copy since reference will change when bone location is modified.
armaturee = bpy.data.armatures[0]
ob = bpy.data.objects['Armature']
armature = ob.data
bpy.ops.object.mode_set(mode='EDIT')
# get specific bone name 'Bone'
pelvis_bone = armature.edit_bones[bone_name_from_index[0]]
# pelvis_bone = armature.edit_bones['f_avg_Pelvis']
pelvis_position = Vector(pelvis_bone.head)
bpy.ops.object.mode_set(mode='OBJECT')
source_index = 0
frame = 1
offset = np.array([0.0, 0.0, 0.0])
while source_index < poses.shape[0]:
print('Adding pose: ' + str(source_index))
# Go to new frame
scene.frame_set(frame)
process_pose(frame, poses[source_index], (trans[source_index] - offset), pelvis_position)
source_index += sample_rate
frame += 1
return frame
def rotate_armature(use):
if use == True:
# Switch to Pose Mode
bpy.ops.object.posemode_toggle()
# Find the Armature & Bones
ob = bpy.data.objects['Armature']
armature = ob.data
bones = armature.bones
rootbone = bones[0]
# Find the Root bone
for bone in bones:
if "avg_root" in bone.name:
rootbone = bone
rootbone.select = True
# Rotate the Root bone by 90 euler degrees on the Y axis. Set --rotate_Y=False if the rotation is not needed.
bpy.ops.transform.rotate(value=1.5708, orient_axis='Y', orient_type='GLOBAL', orient_matrix=((1, 0, 0), (0, 1, 0), (0, 0, 1)), orient_matrix_type='GLOBAL', constraint_axis=(False, True, False), mirror=True, use_proportional_edit=False, proportional_edit_falloff='SMOOTH', proportional_size=1, use_proportional_connected=False, use_proportional_projected=False, release_confirm=True)
# Revert back to Object Mode
bpy.ops.object.posemode_toggle()
def export_animated_mesh(output_path):
# Create output directory if needed
output_dir = os.path.dirname(output_path)
if not os.path.isdir(output_dir):
os.makedirs(output_dir, exist_ok=True)
# Fix Rotation
rotate_armature(args.rotate_y)
# Select only skinned mesh and rig
bpy.ops.object.select_all(action='DESELECT')
bpy.data.objects['Armature'].select_set(True)
bpy.data.objects['Armature'].children[0].select_set(True)
if output_path.endswith('.glb'):
print('Exporting to glTF binary (.glb)')
# Currently exporting without shape/pose shapes for smaller file sizes
bpy.ops.export_scene.gltf(filepath=output_path, export_format='GLB', export_selected=True, export_morph=False)
elif output_path.endswith('.fbx'):
print('Exporting to FBX binary (.fbx)')
bpy.ops.export_scene.fbx(filepath=output_path, use_selection=True, add_leaf_bones=False)
else:
print('ERROR: Unsupported export format: ' + output_path)
sys.exit(1)
return
if __name__ == '__main__':
person_id = 0
try:
if bpy.app.background:
parser = argparse.ArgumentParser(description='Create keyframed animated skinned SMPL mesh from VIBE output')
parser.add_argument('--input', dest='input_path', type=str, default='demo/juntiquan_results/juntiquan_frames_ts_results.npz', #'../demo/videos/sample_video2_results.npz',
help='Input file or directory')
parser.add_argument('--output', dest='output_path', type=str, default='demo/videos/jtq.fbx', #'../demo/videos/sample_video2.fbx',
help='Output file or directory')
parser.add_argument('--fps_source', type=int, default=fps_source,
help='Source framerate')
parser.add_argument('--fps_target', type=int, default=fps_target,
help='Target framerate')
parser.add_argument('--gender', type=str, default=gender,
help='Always use specified gender')
parser.add_argument('--start_origin', type=int, default=start_origin,
help='Start animation centered above origin')
parser.add_argument('--person_id', type=int, default=0,
help='Detected person ID to use for fbx animation')
parser.add_argument('--rotate_y',type = bool,default = True,help = 'whether to rotate the root bone on the Y axis by -90 on export. Otherwise it may be rotated incorrectly')
args = parser.parse_args()
input_path = args.input_path
output_path = args.output_path
print('Input path: ' + input_path)
print('Output path: ' + output_path)
if not os.path.exists(input_path):
print('ERROR: Invalid input path')
sys.exit(1)
fps_source = args.fps_source
fps_target = args.fps_target
gender = args.gender
start_origin = args.start_origin
# end if bpy.app.background
startTime = time.perf_counter()
# Process data
cwd = os.getcwd()
# Turn relative input/output paths into absolute paths
if not input_path.startswith(os.path.sep):
input_path = os.path.join(cwd, input_path)
if not output_path.startswith(os.path.sep):
output_path = os.path.join(cwd, output_path)
if not (output_path.endswith('.fbx') or output_path.endswith('.glb')):
print('ERROR: Invalid output format (must be .fbx or .glb)')
sys.exit(1)
# Process pose file
poses_processed = process_poses(
input_path=input_path,
gender=gender,
fps_source=fps_source,
fps_target=fps_target,
start_origin=start_origin,
person_id=person_id
)
export_animated_mesh(output_path)
print('--------------------------------------------------')
print('Animation export finished.')
print('Poses processed: ', poses_processed)
print('Processing time : ', time.perf_counter() - startTime)
print('--------------------------------------------------')
except SystemExit as ex:
print("closing")
if ex.code is None:
exit_status = 0
else:
exit_status = ex.code
print('Exiting. Exit status: ' + str(exit_status))
# Only exit to OS when we are not running in Blender GUI
if bpy.app.background:
sys.exit(exit_status)
|
doubanfm/API/netease_api.py | fakegit/douban.fm | 783 | 11073775 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
网易云音乐API
根据歌名提供320k音乐url地址
reference: https://github.com/yanunon/NeteaseCloudMusic/wiki/%E7%BD%91%E6%98%93%E4%BA%91%E9%9F%B3%E4%B9%90API%E5%88%86%E6%9E%90
TODO: 有可能歌曲匹配不准确
"""
from __future__ import print_function
import requests
import json
import hashlib
import logging
logger = logging.getLogger('doubanfm') # get logger
class Netease(object):
def __init__(self):
pass
def search(self, song_title, limit=1):
"""
根据歌曲名搜索歌曲
: params : song_title: 歌曲名
limit: 搜索数量
"""
url = "http://music.163.com/api/search/pc"
headers = {'Cookie': 'appver=1.5.2',
'Referer': 'http://music.163.com'}
payload = {'s': song_title,
'limit': limit,
'type': 1}
r = requests.post(url, params=payload, headers=headers)
data = json.loads(r.text)
if data['code'] == 200:
return data['result']['songs'][0]
else:
return None
def get_song_id(self, song_title):
"""
根据歌名获取歌曲id
"""
song = self.search(song_title)
if song.get('hMusic', None):
return song['hMusic']['dfsId'], song['hMusic']['bitrate']
elif song.get('mMusic', None):
return song['mMusic']['dfsId'], song['mMusic']['bitrate']
elif song.get('lMusic', None):
return song['lMusic']['dfsId'], song['lMusic']['bitrate']
return None
def get_url_and_bitrate(self, song_title):
"""
根据歌名搜索320k地址
"""
song_id, bitrate = self.get_song_id(song_title)
url = 'http://m1.music.126.net/'
if song_id:
url += self.encrypted_id(song_id) + '/' + str(song_id) + '.mp3'
bitrate = str(bitrate/1000)
return url, bitrate
else:
return None, None
def encrypted_id(self, id):
id = str(id)
byte1 = bytearray('3go8&$8*3*3h0k(2)2')
byte2 = bytearray(id)
byte1_len = len(byte1)
for i in xrange(len(byte2)):
byte2[i] = byte2[i]^byte1[i%byte1_len]
m = hashlib.md5()
m.update(byte2)
result = m.digest().encode('base64')[:-1]
result = result.replace('/', '_')
result = result.replace('+', '-')
return result
if __name__ == '__main__':
url, bitrate = Netease().get_url_and_bitrate("董小姐")
print(url)
print(bitrate)
|
scripts/sentiment/process_arguana_xml.py | de9uch1/stanza | 3,633 | 11073800 | <reponame>de9uch1/stanza<gh_stars>1000+
from collections import namedtuple
import glob
import os
import sys
import tempfile
import xml.etree.ElementTree as ET
Fragment = namedtuple('Fragment', ['begin', 'end', 'rating'])
"""
Extracts positive, neutral, and negative phrases from the ArguAna hotel review corpus
Run as follows:
python3 parse_arguana_xml.py split/training arguana_train.txt
ArguAna can be downloaded here:
http://argumentation.bplaced.net/arguana/data
http://argumentation.bplaced.net/arguana-data/arguana-tripadvisor-annotated-v2.zip
"""
def get_phrases(filename):
tree = ET.parse(filename)
fragments = []
root = tree.getroot()
body = None
for child in root:
if child.tag == '{http:///uima/cas.ecore}Sofa':
body = child.attrib['sofaString']
elif child.tag == '{http:///de/aitools/ie/uima/type/arguana.ecore}Fact':
fragments.append(Fragment(begin=int(child.attrib['begin']),
end=int(child.attrib['end']),
rating="1"))
elif child.tag == '{http:///de/aitools/ie/uima/type/arguana.ecore}Opinion':
if child.attrib['polarity'] == 'negative':
rating = "0"
elif child.attrib['polarity'] == 'positive':
rating = "2"
else:
raise ValueError("Unexpected polarity found in {}".format(filename))
fragments.append(Fragment(begin=int(child.attrib['begin']),
end=int(child.attrib['end']),
rating=rating))
phrases = [fragment.rating + " " + body[fragment.begin:fragment.end] for fragment in fragments]
#phrases = [phrase.replace("\n", " ") for phrase in phrases]
return phrases
def main():
directory = sys.argv[1]
out_filename = sys.argv[2]
phrases = []
for filename in glob.glob(directory + '/*/*xmi'):
phrases.extend(get_phrases(filename))
print("Found {} phrases".format(len(phrases)))
tmp_filename = tempfile.NamedTemporaryFile(delete=False).name
with open(tmp_filename, "w") as fout:
for phrase in phrases:
fout.write("%s\n" % (phrase))
os.system("java edu.stanford.nlp.process.PTBTokenizer -preserveLines %s > %s" % (tmp_filename, out_filename))
os.unlink(tmp_filename)
if __name__ == "__main__":
main()
|
test/wdt_port_block_test.py | davide125/wdt | 2,894 | 11073809 | #!/usr/bin/env python3
import re
import subprocess
import time
from threading import Thread
from common_utils import *
# Todo: refactor using more of common_utils
receiver_end_time = 0
receiver_status = 0
def wait_for_receiver_finish(receiver_process):
global receiver_end_time
global receiver_status
receiver_status = receiver_process.wait()
receiver_end_time = time.perf_counter()
def test(resumption):
global receiver_end_time
global receiver_status
environment_variable_name = 'WDT_TEST_IPV6_CLIENT'
if (
environment_variable_name in os.environ and
os.environ[environment_variable_name] == "0"
):
print("Test with ipv6 client is disabled in this system")
return
receiver_cmd = get_receiver_binary() + " -skip_writes -num_ports=1 -v 1"
print(receiver_cmd)
receiver_process = subprocess.Popen(
receiver_cmd.split(),
stdout=subprocess.PIPE,
universal_newlines=True
)
connection_url = receiver_process.stdout.readline().strip()
print(connection_url)
# wdt url can be of two kinds :
# 1. wdt://localhost?ports=1,2,3,4
# 2. wdt://localhost:1?num_ports=4
# the second kind of url is another way of expressing the first one
url_match = re.search('\?(.*&)?ports=([0-9]+).*', connection_url)
if not url_match:
url_match = re.search(':([0-9]+)(\?.*)', connection_url)
rest_of_url = url_match.group(2)
port_to_block = url_match.group(1)
start_port = ":" + port_to_block
else:
rest_of_url = url_match.group(0)
start_port = ""
port_to_block = url_match.group(2)
print(rest_of_url + " " + port_to_block)
# start a thread to wait for receiver finish
thread = Thread(target=wait_for_receiver_finish, args=[receiver_process])
thread.start()
# we hack the url to be ::1 instead of hostname to increase chances
# it works on machines which do have ipv6 but no dns entry
sender_cmd = (
"(sleep 20 | nc -4 localhost {0}) &> /dev/null & "
"(sleep 20 | nc -4 localhost {0}) &> /dev/null & "
"sleep 1; {3} -directory wdt/ -ipv6 "
"-num_ports=1 "
"-connection_url \"wdt://[::1]{1}{2}\""
).format(
port_to_block, start_port, rest_of_url, get_sender_binary()
)
if resumption:
sender_cmd = sender_cmd + " -enable_download_resumption"
print(sender_cmd)
status = os.system(sender_cmd)
status >>= 8
sender_end_time = time.perf_counter()
# wait for receiver finish
thread.join()
status |= receiver_status
if status != 0:
print("Test failed, exiting with {0}".format(status))
exit(status)
diff = abs(sender_end_time - receiver_end_time) * 1000
max_allowed_diff = 200
if diff > max_allowed_diff:
print(
(
"Sender and Receiver end time difference {0} is more than "
"allowed diff {1}"
).format(diff, max_allowed_diff)
)
exit(1)
print(
(
"Test passed - Sender and Receiver"
" end time diff {0} ms"
).format(diff)
)
print("Testing without download resumption")
test(False)
print("Testing with download resumption")
test(True)
|
docarray/array/mixins/evaluation.py | fastflair/docarray | 591 | 11073819 | <filename>docarray/array/mixins/evaluation.py
import warnings
from typing import Optional, Union, TYPE_CHECKING, Callable
import numpy as np
from ...score import NamedScore
if TYPE_CHECKING:
from ... import Document, DocumentArray
class EvaluationMixin:
"""A mixin that provides ranking evaluation functionality to DocumentArrayLike objects"""
def evaluate(
self,
other: 'DocumentArray',
metric: Union[str, Callable[..., float]],
hash_fn: Optional[Callable[['Document'], str]] = None,
metric_name: Optional[str] = None,
strict: bool = True,
**kwargs,
) -> Optional[float]:
"""Compute ranking evaluation metrics for a given `DocumentArray` when compared with a groundtruth.
This implementation expects to provide a `groundtruth` DocumentArray that is structurally identical to `self`. It is based
on comparing the `matches` of `documents` inside the `DocumentArray.
This method will fill the `evaluations` field of Documents inside this `DocumentArray` and will return the average of the computations
:param other: The groundtruth DocumentArray` that the `DocumentArray` compares to.
:param metric: The name of the metric, or multiple metrics to be computed
:param hash_fn: The function used for identifying the uniqueness of Documents. If not given, then ``Document.id`` is used.
:param metric_name: If provided, the results of the metrics computation will be stored in the `evaluations` field of each Document. If not provided, the name will be computed based on the metrics name.
:param strict: If set, then left and right sides are required to be fully aligned: on the length, and on the semantic of length. These are preventing
you to evaluate on irrelevant matches accidentally.
:param kwargs: Additional keyword arguments to be passed to `metric_fn`
:return: The average evaluation computed or a list of them if multiple metrics are required
"""
if strict:
self._check_length(len(other))
if hash_fn is None:
hash_fn = lambda d: d.id
if callable(metric):
metric_fn = metric
elif isinstance(metric, str):
from ...math import evaluation
metric_fn = getattr(evaluation, metric)
metric_name = metric_name or metric_fn.__name__
results = []
caller_max_rel = kwargs.pop('max_rel', None)
for d, gd in zip(self, other):
max_rel = caller_max_rel or len(gd.matches)
if strict and hash_fn(d) != hash_fn(gd):
raise ValueError(
f'Document {d} from the left-hand side and '
f'{gd} from the right-hand are not hashed to the same value. '
f'This means your left and right DocumentArray may not be aligned; or it means your '
f'`hash_fn` is badly designed.'
)
if not d.matches or not gd.matches:
raise ValueError(
f'Document {d!r} or {gd!r} has no matches, please check your Document'
)
targets = gd.matches[:max_rel]
desired = {hash_fn(m) for m in targets}
if len(desired) != len(targets):
warnings.warn(
f'{hash_fn!r} may not be valid, as it maps multiple Documents into the same hash. '
f'Evaluation results may be affected'
)
binary_relevance = [1 if hash_fn(m) in desired else 0 for m in d.matches]
r = metric_fn(binary_relevance, max_rel=max_rel, **kwargs)
d.evaluations[metric_name] = NamedScore(
value=r, op_name=str(metric_fn), ref_id=d.id
)
results.append(r)
if results:
return float(np.mean(results))
|
etl/parsers/etw/Microsoft_Windows_Security_Audit_Configuration_Client.py | IMULMUL/etl-parser | 104 | 11073831 | # -*- coding: utf-8 -*-
"""
Microsoft-Windows-Security-Audit-Configuration-Client
GUID : 08466062-aed4-4834-8b04-cddb414504e5
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("08466062-aed4-4834-8b04-cddb414504e5"), event_id=101, version=0)
class Microsoft_Windows_Security_Audit_Configuration_Client_101_0(Etw):
pattern = Struct(
"ErrorCode" / Int32ul
)
@declare(guid=guid("08466062-aed4-4834-8b04-cddb414504e5"), event_id=102, version=0)
class Microsoft_Windows_Security_Audit_Configuration_Client_102_0(Etw):
pattern = Struct(
"GPOList" / WString
)
@declare(guid=guid("08466062-aed4-4834-8b04-cddb414504e5"), event_id=104, version=0)
class Microsoft_Windows_Security_Audit_Configuration_Client_104_0(Etw):
pattern = Struct(
"ErrorCode" / Int32ul
)
@declare(guid=guid("08466062-aed4-4834-8b04-cddb414504e5"), event_id=105, version=0)
class Microsoft_Windows_Security_Audit_Configuration_Client_105_0(Etw):
pattern = Struct(
"GPOName" / WString,
"GPOID" / WString,
"SysvolPath" / WString
)
@declare(guid=guid("08466062-aed4-4834-8b04-cddb414504e5"), event_id=106, version=0)
class Microsoft_Windows_Security_Audit_Configuration_Client_106_0(Etw):
pattern = Struct(
"RemoteFile" / WString,
"LocalFile" / WString,
"GPOName" / WString
)
@declare(guid=guid("08466062-aed4-4834-8b04-cddb414504e5"), event_id=107, version=0)
class Microsoft_Windows_Security_Audit_Configuration_Client_107_0(Etw):
pattern = Struct(
"RemoteFile" / WString,
"LocalFile" / WString,
"GPOName" / WString,
"ErrorCode" / Int32ul
)
@declare(guid=guid("08466062-aed4-4834-8b04-cddb414504e5"), event_id=109, version=0)
class Microsoft_Windows_Security_Audit_Configuration_Client_109_0(Etw):
pattern = Struct(
"ErrorCode" / Int32ul
)
@declare(guid=guid("08466062-aed4-4834-8b04-cddb414504e5"), event_id=111, version=0)
class Microsoft_Windows_Security_Audit_Configuration_Client_111_0(Etw):
pattern = Struct(
"ErrorCode" / Int32ul
)
@declare(guid=guid("08466062-aed4-4834-8b04-cddb414504e5"), event_id=113, version=0)
class Microsoft_Windows_Security_Audit_Configuration_Client_113_0(Etw):
pattern = Struct(
"ErrorCode" / Int32ul
)
@declare(guid=guid("08466062-aed4-4834-8b04-cddb414504e5"), event_id=115, version=0)
class Microsoft_Windows_Security_Audit_Configuration_Client_115_0(Etw):
pattern = Struct(
"ErrorCode" / Int32ul
)
@declare(guid=guid("08466062-aed4-4834-8b04-cddb414504e5"), event_id=201, version=0)
class Microsoft_Windows_Security_Audit_Configuration_Client_201_0(Etw):
pattern = Struct(
"ErrorCode" / Int32ul
)
@declare(guid=guid("08466062-aed4-4834-8b04-cddb414504e5"), event_id=202, version=0)
class Microsoft_Windows_Security_Audit_Configuration_Client_202_0(Etw):
pattern = Struct(
"GPOList" / WString
)
@declare(guid=guid("08466062-aed4-4834-8b04-cddb414504e5"), event_id=204, version=0)
class Microsoft_Windows_Security_Audit_Configuration_Client_204_0(Etw):
pattern = Struct(
"ErrorCode" / Int32ul
)
@declare(guid=guid("08466062-aed4-4834-8b04-cddb414504e5"), event_id=205, version=0)
class Microsoft_Windows_Security_Audit_Configuration_Client_205_0(Etw):
pattern = Struct(
"GPOName" / WString,
"GPOID" / WString,
"SysvolPath" / WString
)
@declare(guid=guid("08466062-aed4-4834-8b04-cddb414504e5"), event_id=206, version=0)
class Microsoft_Windows_Security_Audit_Configuration_Client_206_0(Etw):
pattern = Struct(
"RemoteFile" / WString,
"LocalFile" / WString,
"GPOName" / WString
)
@declare(guid=guid("08466062-aed4-4834-8b04-cddb414504e5"), event_id=207, version=0)
class Microsoft_Windows_Security_Audit_Configuration_Client_207_0(Etw):
pattern = Struct(
"RemoteFile" / WString,
"LocalFile" / WString,
"GPOName" / WString,
"ErrorCode" / Int32ul
)
@declare(guid=guid("08466062-aed4-4834-8b04-cddb414504e5"), event_id=209, version=0)
class Microsoft_Windows_Security_Audit_Configuration_Client_209_0(Etw):
pattern = Struct(
"ErrorCode" / Int32ul
)
@declare(guid=guid("08466062-aed4-4834-8b04-cddb414504e5"), event_id=211, version=0)
class Microsoft_Windows_Security_Audit_Configuration_Client_211_0(Etw):
pattern = Struct(
"ErrorCode" / Int32ul
)
@declare(guid=guid("08466062-aed4-4834-8b04-cddb414504e5"), event_id=213, version=0)
class Microsoft_Windows_Security_Audit_Configuration_Client_213_0(Etw):
pattern = Struct(
"ErrorCode" / Int32ul
)
@declare(guid=guid("08466062-aed4-4834-8b04-cddb414504e5"), event_id=215, version=0)
class Microsoft_Windows_Security_Audit_Configuration_Client_215_0(Etw):
pattern = Struct(
"ErrorCode" / Int32ul
)
|
setup.py | rmallof/pysymemu | 271 | 11073860 | <gh_stars>100-1000
import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "pysymemu",
version = "0.0.1-alpha",
author = "<NAME>",
author_email = "<EMAIL>",
description = ("A tool for symbolic execution of Intel 64 binaries."),
requires = ['pyelftool', 'capstone' ],
provides = ['pysymemu'],
license = "BSD",
url = 'http://github.com/pysymemu',
download_url= 'http://github.com/',
platforms = ['linux', 'win32', 'win64'],
keywords = "testing reverse enginering symbolic execution white box fuzzing automatic test case generation",
long_description=read('README.md'),
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 2.7",
"Topic :: Software Development :: Testing"
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Software Development :: Quality Assurance",
],
test_suite="test",
)
|
dev_test_cex_subscribe.py | hth945/unicorn-binance-websocket-api | 404 | 11073900 | <gh_stars>100-1000
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# File: dev_test_cex_subscribe.py
#
# Part of ‘UNICORN Binance WebSocket API’
# Project website: https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api
# Documentation: https://oliver-zehentleitner.github.io/unicorn-binance-websocket-api
# PyPI: https://pypi.org/project/unicorn-binance-websocket-api/
#
# Author: <NAME>
# https://about.me/oliver-zehentleitner
#
# Copyright (c) 2019-2021, <NAME>
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager import BinanceWebSocketApiManager
import logging
import time
import threading
import os
# https://docs.python.org/3/library/logging.html#logging-levels
logging.basicConfig(level=logging.DEBUG,
filename=os.path.basename(__file__) + '.log',
format="{asctime} [{levelname:8}] {process} {thread} {module}: {message}",
style="{")
def print_stream_data_from_stream_buffer(binance_websocket_api_manager):
while True:
if binance_websocket_api_manager.is_manager_stopping():
exit(0)
oldest_stream_data_from_stream_buffer = binance_websocket_api_manager.pop_stream_data_from_stream_buffer()
if oldest_stream_data_from_stream_buffer is False:
time.sleep(0.01)
else:
#print(oldest_stream_data_from_stream_buffer)
pass
# create instance of BinanceWebSocketApiManager for Binance Chain DEX
binance_websocket_api_manager = BinanceWebSocketApiManager(exchange="binance.com")
# start a worker process to move the received stream_data from the stream_buffer to a print function
worker_thread = threading.Thread(target=print_stream_data_from_stream_buffer, args=(binance_websocket_api_manager,))
worker_thread.start()
#markets = ['xrpusdt', 'rvnbtc']
markets = ['xrpusdt']
stream_id = binance_websocket_api_manager.create_stream(["kline_1m"], markets)
time.sleep(2)
binance_websocket_api_manager.get_stream_subscriptions(stream_id)
#binance_websocket_api_manager.subscribe_to_stream(stream_id,
# channels=['kline_1m', 'kline_5m', 'marketDepth',
# 'ticker', 'miniTicker', 'marketDiff'])
#binance_websocket_api_manager.subscribe_to_stream(stream_id, channels="arr", markets="!miniTicker")
#time.sleep(5)
#binance_websocket_api_manager.get_stream_subscriptions(stream_id)
markets = ['xrpbearbusd', 'zeceth', 'cndbtc', 'dashbtc', 'atompax', 'perlbtc', 'ardreth', 'zecbnb', 'bchabctusd',
'usdsbusdt', 'winbnb', 'xzcxrp', 'bchusdc', 'wavesbnb', 'kavausdt', 'btsusdt', 'chzbnb', 'tusdbnb',
'xtzbusd', 'bcptusdc', 'dogebnb', 'eosbearusdt', 'ambbnb', 'wrxbnb', 'poabtc', 'wanbtc', 'ardrbtc', 'icnbtc',
'tusdusdt', 'atombusd', 'nxseth', 'bnbusdt', 'trxxrp', 'erdpax', 'erdbtc', 'icxbusd', 'nulsbtc', 'hotusdt',
'wavespax', 'zilbnb', 'arnbtc', 'nulsusdt', 'wintrx', 'npxsbtc', 'busdtry', 'qtumbnb', 'eosbtc', 'xlmpax',
'tomobnb', 'eosbnb', 'engbtc', 'linketh', 'xrpbtc', 'fetbtc', 'stratusdt', 'navbnb', 'bcneth', 'yoyobtc',
'nanobnb', 'saltbtc', 'tfuelusdc', 'skybnb', 'fuelbtc', 'bnbusdc', 'inseth', 'btcpax', 'batbtc', 'rlceth',
'arketh', 'ltcpax', 'ltcbusd', 'duskbtc', 'mftusdt', 'bntusdt', 'mdabtc', 'enjbtc', 'poabnb', 'nanobusd',
'paxtusd', 'hotbtc', 'bcdbtc', 'beambnb', 'trxeth', 'omgbnb', 'cdtbtc', 'eosusdc', 'dashbusd', 'cocosbtc',
'dasheth', 'xrptusd', 'atomtusd', 'rcneth', 'rpxeth', 'xlmusdc', 'aionbusd', 'nxsbtc', 'chateth', 'repbtc',
'tctusdt', 'linkusdt', 'nasbtc', 'usdsusdc', 'xvgbtc', 'elfeth', 'ctxcbtc', 'cmteth', 'gnteth', 'usdspax',
'zilbtc', 'batpax', 'stratbtc', 'xzcbtc', 'iotausdt', 'etcbnb', 'ankrusdt', 'xlmeth', 'loombtc', 'erdusdc',
'rdnbnb', 'icneth', 'vetbtc', 'cvcusdt', 'ftmpax', 'ethbullusdt', 'edoeth', 'steemeth', 'gobnb', 'hsrbtc',
'ambbtc', 'bchabcbtc', 'dntbtc', 'btctusd', 'denteth', 'snglsbtc', 'eosbullusdt', 'xlmtusd', 'tnteth',
'sysbnb', 'renusdt', 'zrxusdt', 'xlmbtc', 'stormbtc', 'ncashbnb', 'omgusdt', 'troyusdt', 'venbtc', 'modbtc',
'dogepax', 'ontusdc', 'eurbusd', 'tctbnb', 'gxsbtc', 'celrbnb', 'adausdt', 'beambtc', 'elfbtc', 'celrbtc',
'rvnusdt', 'poaeth', 'wavesusdc', 'trxbnb', 'trxusdc', 'ethbearusdt', 'ethpax', 'bateth', 'kavabtc',
'paxbtc', 'trigbnb', 'btcusdc', 'oneusdc', 'xrptry', 'stxusdt', 'strateth', 'lendeth', 'neousdc',
'neobnb', 'cosbtc', 'powreth', 'rlcusdt', 'hbarbnb', 'wabieth', 'bqxeth', 'aionbtc', 'aeeth', 'mthbtc',
'wrxbtc', 'pptbtc', 'nknbtc', 'zecusdt', 'stormeth', 'qtumusdt']
time.sleep(2)
binance_websocket_api_manager.subscribe_to_stream(stream_id,
channels=['kline_1m', 'marketDepth',
'ticker', 'miniTicker', 'marketDiff'],
markets=markets)
time.sleep(10)
binance_websocket_api_manager.get_stream_subscriptions(stream_id)
results = binance_websocket_api_manager.get_results_from_endpoints()
print(str(results))
time.sleep(5)
for result in results:
print(str(result))
while True:
#binance_websocket_api_manager.print_summary()
binance_websocket_api_manager.print_stream_info(stream_id)
time.sleep(1)
|
Algorithms/Search_Algorithms/Binary_Search/Rotated-Sorted-Array-Search.py | arslantalib3/algo_ds_101 | 182 | 11073902 | <gh_stars>100-1000
def pivotedBinarySearch(arr, n, key):
pivot = findPivot(arr, 0, n-1);
# If we didn't find a pivot,
# then array is not rotated at all
if pivot == -1:
return binarySearch(arr, 0, n-1, key);
# If we found a pivot, then first
# compare with pivot and then
# search in two subarrays around pivot
if arr[pivot] == key:
return pivot
if arr[0] <= key:
return binarySearch(arr, 0, pivot-1, key);
return binarySearch(arr, pivot + 1, n-1, key);
# Function to get pivot. For array
# 3, 4, 5, 6, 1, 2 it returns 3
# (index of 6)
def findPivot(arr, low, high):
# base cases
if high < low:
return -1
if high == low:
return low
# low + (high - low)/2;
mid = int((low + high)/2)
if mid < high and arr[mid] > arr[mid + 1]:
return mid
if mid > low and arr[mid] < arr[mid - 1]:
return (mid-1)
if arr[low] >= arr[mid]:
return findPivot(arr, low, mid-1)
return findPivot(arr, mid + 1, high)
def binarySearch(arr, low, high, key):
if high < low:
return -1
# low + (high - low)/2;
mid = int((low + high)/2)
if key == arr[mid]:
return mid
if key > arr[mid]:
return binarySearch(arr, (mid + 1), high,
key);
return binarySearch(arr, low, (mid -1), key);
arr1 = []
print("Please Enter number of elements in array")
n = int(input())
for i in range(n):
demo = int(input())
arr1.append(demo)
print("Please enter the element in array")
key = int(input())
if key not in arr1:
print("Element not in array")
print("Index of the element is : ",
pivotedBinarySearch(arr1, n, key))
|
deepsnap/graph.py | ruth-ann/deepsnap | 412 | 11073910 | import re
import types
import random
import copy
import math
import pdb
import numpy as np
import torch
from torch_geometric.utils import to_undirected
from typing import (
Dict,
List,
Union
)
import warnings
import deepsnap
class Graph(object):
r"""
A plain python object modeling a single graph with various
(optional) attributes.
Args:
G (Graph object, optional): The NetworkX or SnapX graph
object which contains features and labels. If it is not
specified, :class:`Graph` will use the tensor backend.
netlib (types.ModuleType, optional): The graph backend module.
Currently DeepSNAP supports the NetworkX and SnapX (for
SnapX only the undirected homogeneous graph) as the graph
backend. Default graph backend is the NetworkX.
**kwargs (optional): Keyworded argument list with keys such
as :obj:`node_feature`, :obj:`node_label` and
values that are corresponding attributes. The features
are required for the tensor backend.
"""
def __init__(self, G=None, netlib=None, **kwargs):
self.G = G
if netlib is not None:
deepsnap._netlib = netlib
keys = [
"node_feature",
"node_label",
"edge_feature",
"edge_label",
"graph_feature",
"graph_label",
"edge_index",
"edge_label_index",
"node_label_index",
"custom"
]
for key in keys:
self[key] = None
self._is_train = False
self._num_positive_examples = None
for key, item in kwargs.items():
self[key] = item
if G is None and kwargs:
if "directed" not in kwargs:
self.directed = True
if "edge_index" not in kwargs:
raise ValueError(
"A tensor of edge_index is required by using "
"the tensor backend."
)
# check for undirected edge_index format
if not self.directed:
edge_index_length = self.edge_index.shape[1]
edge_index_first_half, _ = (
torch.sort(self.edge_index[:, :int(edge_index_length / 2)])
)
edge_index_second_half, _ = (
torch.sort(self.edge_index[:, int(edge_index_length / 2):])
)
if not torch.equal(
edge_index_first_half,
torch.flip(edge_index_second_half, [0])
):
raise ValueError(
"In tensor backend mode with undirected graph, "
"the user provided edge_index should contain "
"undirected edges for both directions."
"the first half of edge_index should contain "
"unique edges in one direction and the second "
"half of edge_index should contain the same set "
"of unique edges of another direction."
)
if G is not None or kwargs:
# handle tensor backend + custom support
if (
("edge_label_index" not in kwargs)
and ("node_label_index" not in kwargs)
):
self._update_tensors(init=True)
else:
self._update_tensors(init=False)
@classmethod
def _from_dict(cls, dictionary: Dict[str, torch.tensor]):
r"""
Creates a data object from a python dictionary.
Args:
dictionary (dict): Python dictionary with key (string)
- value (torch.tensor) pair.
Returns:
:class:`deepsnap.graph.Graph`: return a new Graph object
with the data from the dictionary.
"""
if "G" in dictionary:
# If there is an G, initialize class in the graph backend
graph = cls(G=dictionary["G"])
else:
graph = cls(**dictionary)
for key, item in dictionary.items():
graph[key] = item
return graph
def __getitem__(self, key: str):
r"""
Gets the data of the attribute :obj:`key`.
"""
return getattr(self, key, None)
def __setitem__(self, key: str, value):
"""Sets the attribute :obj:`key` to :obj:`value`."""
setattr(self, key, value)
@property
def keys(self):
r"""
Returns all names of the graph attributes.
Returns:
list: List of attributes in the :class:`Graph` object.
"""
# filter attributes that are not observed by users
# (1) those with value "None"; (2) those start with '_'
keys = [
key
for key in self.__dict__.keys()
if self[key] is not None and key[0] != "_"
]
return keys
def __len__(self) -> int:
r"""
Returns the number of all present attributes.
Returns:
int: The number of all present attributes.
"""
return len(self.keys)
def __contains__(self, key: str) -> bool:
r"""
Returns :obj:`True`, if the attribute :obj:`key` is present in the
data.
"""
return key in self.keys
def __iter__(self):
r"""
Iterates over all present attributes in the data, yielding their
attribute names and content.
"""
for key in sorted(self.keys):
yield key, self[key]
def __call__(self, *keys):
r"""
Iterates over all attributes :obj:`*keys` in the data, yielding
their attribute names and content.
If :obj:`*keys` is not given this method will iterative over all
present attributes.
"""
for key in sorted(self.keys) if not keys else keys:
if key in self:
yield key, self[key]
def __cat_dim__(self, key: str, value) -> int:
r"""
Returns the dimension for which :obj:`value` of attribute
:obj:`key` will get concatenated when creating batches.
.. note::
This method is for internal use only, and should only be overridden
if the batch concatenation process is corrupted for a specific data
attribute.
"""
# `*index*` and `*face*` should be concatenated in the last dimension,
# everything else in the first dimension.
return -1 if "index" in key else 0
def __inc__(self, key: str, value) -> int:
r""""
Returns the incremental count to cumulatively increase the value
of the next attribute of :obj:`key` when creating batches.
.. note::
This method is for internal use only, and should only be overridden
if the batch concatenation process is corrupted for a specific data
attribute.
"""
# Only `*index*` and `*face*` should be cumulatively summed up when
# creating batches.
return self.num_nodes if "index" in key else 0
@property
def num_nodes(self) -> int:
r"""
Return number of nodes in the graph.
Returns:
int: Number of nodes in the graph.
"""
if self.G is not None:
return self.G.number_of_nodes()
return self[self._node_related_key].shape[0]
@property
def num_edges(self) -> int:
r"""
Returns the number of edges in the graph.
Returns:
int: Number of edges in the graph.
"""
if self.G is not None:
return self.G.number_of_edges()
if self.is_undirected():
return int(self.edge_index.shape[1] / 2)
return self.edge_index.shape[1]
@property
def num_node_features(self) -> int:
r"""
Returns node feature dimension in the graph.
Returns:
int: Node feature dimension. `0` if there is no
`node_feature`.
"""
return self.get_num_dims("node_feature", as_label=False)
@property
def num_node_labels(self) -> int:
r"""
Returns the number of the node labels in the graph.
Returns:
int: Number of node labels. `0` if there is no
`node_label`.
"""
return self.get_num_dims("node_label", as_label=True)
@property
def num_edge_features(self) -> int:
r"""
Returns edge feature dimension in the graph.
Returns:
int: Edge feature dimension. `0` if there is no
`edge_feature`.
"""
return self.get_num_dims("edge_feature", as_label=False)
@property
def num_edge_labels(self) -> int:
r"""
Returns the number of the edge labels in the graph.
Returns:
int: Number of edge labels. `0` if there is no
`edge_label`.
"""
return self.get_num_dims("edge_label", as_label=True)
@property
def num_graph_features(self) -> int:
r"""
Returns graph feature dimension in the graph.
Returns:
int: Graph feature dimension. `0` if there is no
`graph_feature`.
"""
return self.get_num_dims("graph_feature", as_label=False)
@property
def num_graph_labels(self) -> int:
r"""
Returns the number of the graph labels in the graph.
Returns:
int: Number of graph labels. `0` if there is no
`graph_label`.
"""
return self.get_num_dims("graph_label", as_label=True)
def get_num_labels(self, key: str):
r"""
Gets the lables for a specified key.
Args:
key (str): The chosen property.
Returns:
:class:`torch.Tensor`: Unique lables (in tensor format).
"""
return torch.unique(self[key])
def get_num_dims(self, key: str, as_label: bool = False) -> int:
r"""
Returns the number of dimensions for one graph/node/edge property.
Args:
key (str): The chosen property.
as_label (bool): If `as_label`, treat the tensor as labels.
Returns:
int: The number of dimensions for chosen property.
"""
if as_label:
# treat as label
if self[key] is not None:
if self[key].dtype == torch.long:
# classification label
return self.get_num_labels(key).shape[0]
else:
# regression label
if (
len(self[key].shape) == 1
and not Graph._is_graph_attribute(key)
):
# for node/edge tasks: 1 scalar per node/edge
return 1
else:
return self[key].shape[-1]
else:
return 0
else:
# treat as feature
if self[key] is not None:
return self[key].shape[1]
else:
return 0
def is_directed(self) -> bool:
r"""
Whether the graph is directed.
Returns:
bool: `True` if the graph is directed.
"""
if self.G is not None:
return self.G.is_directed()
return self.directed
def is_undirected(self) -> bool:
r"""
Whether the graph is undirected.
Returns:
bool: `True` if the graph is undirected.
"""
return not self.is_directed()
def apply_tensor(self, func, *keys):
r"""
Applies the function :obj:`func` to all tensor attributes specified by
:obj:`*keys`. If the :obj:`*keys` is not given, :obj:`func` is applied to
all present attributes.
Args:
func (callable): The function that will be applied
to a PyTorch tensor(s).
*keys (str, optional): Names of the tensor attributes that will
be applied.
Returns:
:class:`deepsnap.graph.Graph`: Return the
self :class:`deepsnap.graph.Graph`.
"""
for key, item in self(*keys):
if torch.is_tensor(item):
self[key] = func(item)
elif isinstance(self[key], dict):
for obj_key, obj_item in self[key].items():
if torch.is_tensor(obj_item):
self[key][obj_key] = func(obj_item)
return self
def contiguous(self, *keys):
r"""
Ensures a contiguous memory layout for the attributes specified by
:obj:`*keys`. If :obj:`*keys` is not given, all present attributes
are ensured to have a contiguous memory layout.
Args:
*keys (str, optional): Tensor attributes which will be in
contiguous memory layout.
Returns:
:class:`Graph`: :class:`Graph` object with specified tensor
attributes in contiguous memory layout.
"""
return self.apply_tensor(lambda x: x.contiguous(), *keys)
def to(self, device, *keys):
r"""
Transfers tensor to specified device for to all attributes that
are specified in the :obj:`*keys`.
If :obj:`*keys` is not given, the conversion is applied to all
present attributes.
Args:
device (str): Specified device name, such as `cpu` or
`cuda`.
*keys (str, optional): Tensor attributes that will be
transferred to specified device.
"""
return self.apply_tensor(lambda x: x.to(device), *keys)
def clone(self):
r"""
Deepcopy the graph object.
Returns:
:class:`Graph`:
A cloned :class:`Graph` object with deepcopying
all features.
"""
dictionary = {}
for k, v in self.__dict__.items():
if torch.is_tensor(v):
dictionary[k] = v.clone()
elif k == "netlib":
dictionary[k] = v
else:
if hasattr(v, "netlib"):
v.netlib = None
dictionary[k] = copy.deepcopy(v)
return self.__class__._from_dict(dictionary)
def _size_repr(self, value) -> List[int]:
r"""
Returns:
list: A list of size of each element in value
"""
if torch.is_tensor(value):
return list(value.size())
elif isinstance(value, int) or isinstance(value, float):
return [1]
elif isinstance(value, list) or isinstance(value, tuple):
return [len(value)]
else:
return []
def __repr__(self):
info = [f"{key}={self._size_repr(item)}" for key, item in self]
return f"{self.__class__.__name__}({', '.join(info)})"
@staticmethod
def _is_edge_attribute(key: str) -> bool:
r"""
Check whether an attribute is a edge attribute.
"""
# could be feature, label, etc.
return "edge" in key and "index" not in key
@staticmethod
def _is_node_attribute(key: str) -> bool:
r"""
Check whether an attribute is a node attribute.
"""
# could be feature, label, etc.
return "node" in key and "index" not in key
@staticmethod
def _is_graph_attribute(key: str) -> bool:
r"""
Check whether an attribute is a graph attribute.
"""
return "graph" in key and "index" not in key
def _is_valid(self):
r"""
Check validity.
"""
for key in self.keys:
if self._is_node_attribute(key):
if self.num_nodes != self[key].shape[0]:
raise ValueError(
f"key {key} is not valid, num nodes must equal "
"num nodes w/ features."
)
def _update_tensors(self, init: bool = False):
r"""
Update attributes and indices with values from the self.G
graph object.
"""
if self.G is not None:
self._update_attributes()
self._node_related_key = None
for key in self.keys:
if self._is_node_attribute(key):
self._node_related_key = key
break
if self._node_related_key is None:
warnings.warn("Node related key is required.")
self._update_index(init)
def _update_attributes(self):
r"""
Update attributes with values from the self.g graph object.
"""
# node
if self.G.number_of_nodes() == 0:
raise ValueError(
"in _update_attributes, number of nodes in Graph "
"G must be larger than 0"
)
if self.G.number_of_edges() == 0:
raise ValueError(
"in _update_attributes, number of edges in Graph "
"G must be larger than 0"
)
# node
keys = next(iter(self.G.nodes(data=True)))[-1].keys()
for key in keys:
self[key] = self._get_node_attributes(key)
# edge
keys = next(iter(self.G.edges(data=True)))[-1].keys()
for key in keys:
self[key] = self._get_edge_attributes(key)
# graph
keys = self.G.graph.keys()
for key in keys:
self[key] = self._get_graph_attributes(key)
def _get_node_attributes(self, name: str) -> torch.tensor:
r"""
Returns the node attributes in the graph.
Multiple attributes will be stacked.
Args:
name(string): the name of the attributes to return.
Returns:
:class:`torch.tensor`: Node attributes.
"""
# new: concat
attributes = []
for _, d in self.G.nodes.items():
if name in d:
attributes.append(d[name])
if len(attributes) == 0:
return None
if torch.is_tensor(attributes[0]):
attributes = torch.stack(attributes, dim=0)
elif isinstance(attributes[0], float):
attributes = torch.tensor(attributes, dtype=torch.float)
elif isinstance(attributes[0], int):
attributes = torch.tensor(attributes, dtype=torch.long)
return attributes
def _get_edge_attributes(self, key: str) -> torch.tensor:
r"""
Returns the edge attributes in the graph.
Multiple attributes will be stacked.
Args:
key(string): the name of the attributes to return.
Returns:
:class:`torch.tensor`: Edge attributes.
"""
# new: concat
attributes = []
for x in self.G.edges(data=True):
if key in x[-1]:
attributes.append(x[-1][key])
if len(attributes) == 0:
return None
if torch.is_tensor(attributes[0]):
attributes = torch.stack(attributes, dim=0)
elif isinstance(attributes[0], float):
attributes = torch.tensor(attributes, dtype=torch.float)
elif isinstance(attributes[0], int):
attributes = torch.tensor(attributes, dtype=torch.long)
else:
raise TypeError(f"Unknown type {key} in edge attributes.")
if self.is_undirected():
attributes = torch.cat([attributes, attributes], dim=0)
return attributes
def _get_graph_attributes(self, key: str):
r"""
Returns the graph attributes.
Args:
key(string): the name of the attributes to return.
Returns:
any: graph attributes with the specified name.
"""
return self.G.graph.get(key)
def _update_nodes(
self,
nodes,
mapping: Dict[Union[str, int], int]
) -> List[tuple]:
r"""
Relabel nodes following mapping and add node dictionary for each
node if it is not already provided.
Returns:
list: A list of tuples representing nodes and node dictionaries.
"""
if isinstance(nodes[0], tuple):
# node dictionary is already provided
nodes = [
(mapping[node[0]], node[-1])
for node in nodes
]
else:
# node dictionary is not provided
nodes = [
(
mapping[node],
self.G.nodes[mapping[node]]
)
for node in nodes
]
return nodes
def _update_edges(self, edges, mapping, add_edge_info: bool = True):
r"""
Relabel edges following mapping and add edge dictionary for each
edge if it is not already provided.
Returns:
list: A list of tuples representing edges and edge dictionaries.
"""
for i in range(len(edges)):
node_0 = mapping[
edges[i][0]
]
node_1 = mapping[
edges[i][1]
]
if isinstance(edges[i][-1], dict):
# edge dictionary is already provided
edge_info = edges[i][-1]
if len(edges[i][:-1]) == 2:
edge = (node_0, node_1, edge_info)
elif len(edges[i][:-1]) == 3:
graph_index = edges[i][2]
edge = (node_0, node_1, graph_index, edge_info)
else:
raise ValueError("Each edge has more than 3 indices.")
else:
# edge dictionary is not provided
if len(edges[i]) == 2:
# not multigraph
if add_edge_info:
if self.G is not None:
edge = (
node_0, node_1, self.G.edges[node_0, node_1]
)
else:
feature_dict = {}
for key in self.keys:
if (
self._is_edge_attribute(key)
and torch.is_tensor(self[key])
):
feature_dict[key] = self[key][i]
edge = (node_0, node_1, feature_dict)
else:
edge = (node_0, node_1)
elif len(edges[i]) == 3:
# multigraph
graph_index = edges[i][2]
if add_edge_info:
if self.G is not None:
edge = (
node_0, node_1, graph_index,
self.G.edges[node_0, node_1, graph_index]
)
else:
feature_dict = {}
for key in self.keys:
if (
self._is_edge_attribute(key)
and torch.is_tensor(self[key])
):
feature_dict[key] = self[key][i]
edge = (node_0, node_1, feature_dict)
else:
edge = (node_0, node_1, graph_index)
else:
raise ValueError("Each edge has more than 3 indices.")
edges[i] = edge
return edges
def _custom_update(self, mapping: Dict[Union[int, str], int]):
r"""
Custom support by populating self.general_splits,
self.disjoint_split self.negative_edges and self.task
"""
custom_keys = [
"general_splits", "disjoint_split", "negative_edges", "task"
]
if self.custom is not None:
for custom_key in custom_keys:
if custom_key in self.custom:
self[custom_key] = self.custom[custom_key]
elif not hasattr(self, custom_key):
self[custom_key] = None
if self.task is None:
raise ValueError(
"User must provide the task variable in dataset or graph "
"custom. optional values for task are node, edge and "
"link_pred."
)
if self.task not in ["node", "edge", "link_pred"]:
raise ValueError(
"self.task in graph.py must be either node, "
"edge or link_pred. the current self.task "
f"value is {self.task}."
)
if self.general_splits is not None:
if self.task == "node":
for i in range(len(self.general_splits)):
self.general_splits[i] = self._update_nodes(
self.general_splits[i],
mapping
)
elif self.task == "edge" or self.task == "link_pred":
for i in range(len(self.general_splits)):
self.general_splits[i] = self._update_edges(
self.general_splits[i],
mapping
)
if self.disjoint_split is not None:
if self.task == "link_pred":
self.disjoint_split = self._update_edges(
self.disjoint_split,
mapping
)
else:
raise ValueError(
"When self.disjoint_splits is not "
"None, self.task must be `link_pred`."
)
if self.negative_edges is not None:
if self.task == "link_pred":
for i in range(len(self.negative_edges)):
self.negative_edges[i] = self._update_edges(
self.negative_edges[i],
mapping,
add_edge_info=False
)
else:
raise ValueError(
"When self.negative_edges is not "
"None, self.task must be `link_pred`."
)
self._custom_update_flag = True
def _update_index(self, init: bool = False):
r"""
Update attributes and indices with values from the self.G
"""
# relabel graphs
if self.G is not None:
keys = list(self.G.nodes)
vals = list(range(self.num_nodes))
mapping = dict(zip(keys, vals))
if keys != vals:
self.G = deepsnap._netlib.relabel_nodes(
self.G, mapping, copy=True
)
# get edges
self.edge_index = self._edge_to_index(list(self.G.edges))
else:
mapping = {x: x for x in range(self.num_nodes)}
if init:
# init is only true when creating the variables
# edge_label_index and node_label_index
self.edge_label_index = copy.deepcopy(self.edge_index)
self.node_label_index = (
torch.arange(self.num_nodes, dtype=torch.long)
)
self._custom_update(mapping)
def _node_to_index(self, nodes):
r"""
List of G.nodes to torch tensor node_index
Only the selected nodes' node indices are extracted.
Returns:
:class:`torch.tensor`: Node indices.
"""
nodes = [node[0] for node in nodes]
node_index = torch.tensor(nodes)
return node_index
def _edge_to_index(self, edges):
r"""
List of G.edges to torch tensor edge_index
Only the selected edges' edge indices are extracted.
Returns:
:class:`torch.tensor`: Edge indices.
"""
edges = [(edge[0], edge[1]) for edge in edges]
edge_index = torch.tensor(edges)
if self.is_undirected():
edge_index = torch.cat(
[edge_index, torch.flip(edge_index, [1])],
dim=0
)
return edge_index.permute(1, 0)
def _get_edge_attributes_by_key(self, edges, key: str):
r"""
List of G.edges to torch tensor for key,
with dimension [num_edges x key_dim].
Only the selected edges' attributes are extracted.
Returns:
:class:`torch.tensor`: Edge attributes.
"""
if len(edges) == 0:
raise ValueError(
"in _get_edge_attributes_by_key, "
"len(edges) must be larger than 0."
)
if not isinstance(edges[0][-1], dict) or key not in edges[0][-1]:
return None
attributes = [edge[-1][key] for edge in edges]
if torch.is_tensor(attributes[0]):
attributes = torch.stack(attributes, dim=0)
elif isinstance(attributes[0], float):
attributes = torch.tensor(attributes, dtype=torch.float)
elif isinstance(attributes[0], int):
attributes = torch.tensor(attributes, dtype=torch.long)
else:
raise ValueError("Unknown type of key {} in edge attributes.")
if self.is_undirected():
attributes = torch.cat([attributes, attributes], dim=0)
return attributes
def _get_edge_attributes_by_key_tensor(self, edge_index, key: str):
r"""
Extract the edge attributes indicated by edge_index in tensor backend.
Returns:
:class:`torch.tensor`: Edge attributes.
"""
if not torch.is_tensor(edge_index):
raise TypeError(
"edge_index is not in the correct format."
)
if key == "edge_index":
raise ValueError(
"edge_index cannot be selected."
)
if key not in self.keys or not torch.is_tensor(self[key]):
return None
attributes = torch.index_select(self[key], 0, edge_index)
if self.is_undirected():
attributes = torch.cat([attributes, attributes], dim=0)
return attributes
def _update_graphs(self, verbose: bool = False):
r"""
Update the .G graph object with new attributes.
The edges remain unchanged
(edge_index should not be directly modified).
The counter-part of update_tensors.
"""
for key in self.keys:
if Graph._is_node_attribute(key):
Graph.add_node_attr(self.G, key, self[key])
elif Graph._is_edge_attribute(key):
# the order of edge attributes is consistent with edge index
Graph.add_edge_attr(self.G, key, self[key])
elif Graph._is_graph_attribute(key):
Graph.add_graph_attr(self.G, key, self[key])
else:
if verbose:
print(f"Index fields: {key} ignored.")
def apply_transform(
self,
transform,
update_tensor: bool = True,
update_graph: bool = False,
deep_copy: bool = False,
**kwargs
):
r"""
Applies the transformation function to current graph object.
.. note::
When the backend graph object (e.g. networkx object) is
changed in the transform function, the argument :obj:`update_tensor`
is recommended, which will update the tensor representation to be in
sync with the transformed graph. Similarly, :obj:`update_graph` is
recommended when the transform function makes change to the tensor
objects.
However, the transformation function should not make changes to both
of the backend graph object and the tensors simultaneously. Otherwise
there might exist inconsistency between the transformed graph and
tensors. Also note that :obj:`update_tensor` and :obj:`update_graph`
cannot be `True` at the same time.
It is also possible to set both :obj:`update_tensor` and
:obj:`update_graph` to be `False`. This usually happens when one
needs to transform the tensor representation, but do not require that
the internal graph object to be in sync, for better efficiency.
In this case, the user should note that the internal `G` object is stale,
and that applying a transform in the future with
:obj:`update_tensor=True` will overwrite the current
transformmation (which with parameters
:obj:`update_tensor=False; update_graph=False`).
Args:
transform (callable): In the format
of :obj:`transform(deepsnap.graph.Graph, **kwargs)`.
The function might need to return :class:`deepsnap.graph.Graph`
(the transformed graph object).
update_tensor (bool): If the graph has changed, use the
graph to update the stored tensor attributes.
update_graph: (bool): If the tensor attributes have changed,
use the attributes to update the graph.
deep_copy (bool): If `True`, the graph will be deepcopied
and then fed into the :meth:`transform` function.
In this case, the :meth:`transform` function might also
need to return a :class:`Graph` object.
.. note::
When returning :obj:`Graph` object in the transform function,
user should decide whether the tensor values of the graph is
to be copied (deepcopy).
**kwargs (optional): Parameters used in the :meth:`transform`
function.
Returns:
:class:`Graph`: The transformed :class:`Graph` object.
Note:
This function is different from the function :meth:`apply_tensor`.
"""
if update_tensor and update_graph:
raise ValueError(
"Tensor and graph should not be specified together."
)
graph_obj = copy.deepcopy(self) if deep_copy else self
return_graph = transform(graph_obj, **kwargs)
if isinstance(return_graph, self.__class__):
return_graph = return_graph
elif return_graph is None:
# no return value; assumes in-place transform of the graph object
return_graph = graph_obj
else:
raise TypeError(
"Transform function returns a value of unknown type "
f"({return_graph.__class__})"
)
if update_graph:
if self.G is None:
raise ValueError("There is no G in the class.")
return_graph._update_graphs()
if update_tensor:
return_graph._update_tensors()
return return_graph
def apply_transform_multi(
self,
transform,
update_tensors: bool = True,
update_graphs: bool = False,
deep_copy: bool = False,
**kwargs
):
r"""
Applies transform function to the current graph object.
But Unlike the :meth:`apply_transform`, the transform
argument in this method can return a tuple of graphs
(:class:`Graph`).
Args:
transform (callable): In the format of
:obj:`transform(deepsnap.graph.Graph, **kwargs)`.
The function might need to return a tuple of graphs
that each has the type :class:`deepsnap.graph.Graph`
(the transformed graph objects).
update_tensors (bool): If the graphs have changed, use the
graph to update the stored tensor attributes.
update_graphs: (bool): If the tensor attributes have changed,
use the attributes to update the graphs.
deep_copy (bool): If `True`, the graph will be deepcopied
and then fed into the :meth:`transform` function.
In this case, the :meth:`transform` function might also
need to return a `Graph` object.
**kwargs (optional): Parameters used in the :meth:`transform`
function.
Returns:
tuple: A tuple of transformed :class:`Graph` objects.
"""
if update_tensors and update_graphs:
raise ValueError(
"Tensor and graph should not be specified together."
)
graph_obj = copy.deepcopy(self) if deep_copy else self
return_graphs = transform(graph_obj, **kwargs)
if isinstance(return_graphs[0], self.__class__):
return_graphs = return_graphs
elif return_graphs is None or len(return_graphs) == 0:
# no return value; assumes in-place transform of the graph object
return_graphs = (graph_obj,)
else:
raise TypeError(
"Transform function returns a value of unknown type "
f"({return_graphs[0].__class__})."
)
if update_graphs:
for return_graph in return_graphs:
if self.G is None:
raise ValueError("There is no G in the class.")
return_graph._update_graphs()
if update_tensors:
for return_graph in return_graphs:
return_graph._update_tensors()
return return_graphs
def _custom_split_node(self):
r"""
custom support version of _split_node
"""
split_num = len(self.general_splits)
split_graph = []
for i in range(split_num):
graph = copy.copy(self)
graph.node_label_index = self._node_to_index(
self.general_splits[i]
)
node_labels = []
for node in self.general_splits[i]:
node_label = node[-1]["node_label"]
node_labels.append(node_label)
node_labels = torch.tensor(node_labels)
graph.node_label = node_labels
split_graph.append(graph)
return split_graph
def _custom_split_edge(self):
r"""
custom support version of _split_edge
"""
split_num = len(self.general_splits)
split_graph = []
for i in range(split_num):
graph = copy.copy(self)
graph.edge_label_index = self._edge_to_index(
graph.general_splits[i]
)
edge_labels = []
for edge in graph.general_splits[i]:
edge_label = edge[-1]["edge_label"]
edge_labels.append(edge_label)
edge_labels = torch.tensor(edge_labels)
if self.is_undirected():
edge_labels = torch.cat(
[edge_labels, edge_labels]
)
graph.edge_label = edge_labels
split_graph.append(graph)
return split_graph
def _custom_split(
self,
task: str
):
r"""
custom support version of split
"""
if task == "node":
return self._custom_split_node()
elif task == "edge":
return self._custom_split_edge()
elif task == "link_pred":
return self._custom_split_link_pred()
elif task == "graph":
raise ValueError("Graph task does not split individual graphs.")
else:
raise ValueError("Unknown task.")
def split(
self,
task: str = "node",
split_ratio: List[float] = None,
shuffle: bool = True
):
r"""
Split current graph object to a list of graph objects.
Args:
task (str): One of `node`, `edge` or `link_pred`.
split_ratio (list): A list of ratios such as
`[train_ratio, validation_ratio, test_ratio]`. Default
is `[0.8, 0.1, 0.1]`.
shuffle (bool): Whether to shuffle data for the splitting.
Returns:
list: A list of :class:`Graph` objects.
"""
if split_ratio is None:
split_ratio = [0.8, 0.1, 0.1]
if not isinstance(split_ratio, list):
raise TypeError("split ratio must be a list.")
if len(split_ratio) > 3:
raise ValueError("split ratio must contain leq three values")
if not math.isclose(sum(split_ratio), 1.0):
raise ValueError("split ratio must sum up to 1.")
if not all(
isinstance(split_ratio_i, float) for split_ratio_i in split_ratio
):
raise TypeError("split ratio must contain all floats")
if not all(split_ratio_i > 0 for split_ratio_i in split_ratio):
raise ValueError("split ratio must contain all positivevalues.")
if task == "node":
return self._split_node(split_ratio, shuffle=shuffle)
elif task == "edge":
return self._split_edge(split_ratio, shuffle=shuffle)
elif task == "link_pred":
return self.split_link_pred(split_ratio, shuffle=shuffle)
elif task == "graph":
raise ValueError("Graph task does not split individual graphs.")
else:
raise ValueError("Unknown task.")
def _split_node(self, split_ratio: float, shuffle: bool = True):
r"""
Split the graph into len(split_ratio) graphs for node prediction.
Internally this splits node indices, and the model will only compute
loss for the embedding of
nodes in each split graph.
In node classification, the whole graph is observed in train/val/test
Only split over node_label_index
"""
if self.num_nodes < len(split_ratio):
raise ValueError(
"In _split_node num of nodes are smaller than"
"number of splitted parts."
)
split_graphs = []
if shuffle:
shuffled_node_indices = torch.randperm(self.num_nodes)
else:
shuffled_node_indices = torch.arange(self.num_nodes)
# used to indicate whether default splitting results in
# empty splitted graphs
split_empty_flag = False
nodes_split_list = []
# perform `default split`
split_offset = 0
for i, split_ratio_i in enumerate(split_ratio):
if i != len(split_ratio) - 1:
num_split_i = int(split_ratio_i * self.num_nodes)
nodes_split_i = shuffled_node_indices[
split_offset:split_offset + num_split_i
]
split_offset += num_split_i
else:
nodes_split_i = shuffled_node_indices[split_offset:]
if nodes_split_i.numel() == 0:
split_empty_flag = True
split_offset = 0
nodes_split_list = []
break
nodes_split_list.append(nodes_split_i)
if split_empty_flag:
# perform `secure split` s.t. guarantees all splitted subgraph
# contains at least one node.
for i, split_ratio_i in enumerate(split_ratio):
if i != len(split_ratio) - 1:
num_split_i = 1 + int(
split_ratio_i * (self.num_nodes - len(split_ratio))
)
nodes_split_i = shuffled_node_indices[
split_offset:split_offset + num_split_i
]
split_offset += num_split_i
else:
nodes_split_i = shuffled_node_indices[split_offset:]
nodes_split_list.append(nodes_split_i)
for nodes_split_i in nodes_split_list:
# shallow copy all attributes
graph_new = copy.copy(self)
graph_new.node_label_index = nodes_split_i
graph_new.node_label = self.node_label[nodes_split_i]
split_graphs.append(graph_new)
return split_graphs
def _split_edge(self, split_ratio: float, shuffle: bool = True):
r"""
Split the graph into len(split_ratio) graphs for node prediction.
Internally this splits node indices, and the model will only compute
loss for the embedding of nodes in each split graph.
In edge classification, the whole graph is observed in train/val/test.
Only split over edge_label_index.
"""
if self.num_edges < len(split_ratio):
raise ValueError(
"In _split_node num of edges are smaller than"
"number of splitted parts."
)
split_graphs = []
if shuffle:
shuffled_edge_indices = torch.randperm(self.num_edges)
else:
shuffled_edge_indices = torch.arange(self.num_edges)
split_offset = 0
# used to indicate whether default splitting results in
# empty splitted graphs
split_empty_flag = False
edges_split_list = []
for i, split_ratio_i in enumerate(split_ratio):
if i != len(split_ratio) - 1:
num_split_i = int(split_ratio_i * self.num_edges)
edges_split_i = shuffled_edge_indices[
split_offset:split_offset + num_split_i
]
split_offset += num_split_i
else:
edges_split_i = shuffled_edge_indices[split_offset:]
if edges_split_i.numel() == 0:
split_empty_flag = True
split_offset = 0
edges_split_list = []
break
edges_split_list.append(edges_split_i)
if split_empty_flag:
# perform `secure split` s.t. guarantees all splitted subgraph
# contains at least one edge.
for i, split_ratio_i in enumerate(split_ratio):
if i != len(split_ratio) - 1:
num_split_i = 1 + int(
split_ratio_i * (self.num_edges - len(split_ratio))
)
edges_split_i = shuffled_edge_indices[
split_offset:split_offset + num_split_i
]
split_offset += num_split_i
else:
edges_split_i = shuffled_edge_indices[split_offset:]
edges_split_list.append(edges_split_i)
for edges_split_i in edges_split_list:
# shallow copy all attributes
graph_new = copy.copy(self)
graph_new.edge_label_index = self.edge_index[:, edges_split_i]
graph_new.edge_label = self.edge_label[edges_split_i]
split_graphs.append(graph_new)
return split_graphs
def _custom_split_link_pred_disjoint(self):
r"""
custom support version of disjoint split_link_pred
"""
objective_edges = self.disjoint_split
objective_edges_no_info = [edge[:-1] for edge in objective_edges]
message_edges_no_info = (
list(set(self.G.edges) - set(objective_edges_no_info))
)
if len(message_edges_no_info[0]) == 3:
message_edges = [
(
edge[0], edge[1], edge[2],
self.G.edges[edge[0], edge[1], edge[2]]
)
for edge in message_edges_no_info
]
elif len(message_edges_no_info[0]) == 2:
message_edges = [
(edge[0], edge[1], self.G.edges[edge[0], edge[1]])
for edge in message_edges_no_info
]
else:
raise ValueError("Each edge has more than 3 indices.")
graph_train = Graph(
self._edge_subgraph_with_isonodes(
self.G,
message_edges,
)
)
graph_train.negative_label_val = self.negative_label_val
graph_train._create_label_link_pred(
graph_train, objective_edges
)
graph_train._is_train = True
return graph_train
def _custom_split_link_pred(self):
r"""
custom support version of split_link_pred
"""
split_num = len(self.general_splits)
split_graph = []
edges_train = self.general_splits[0]
edges_val = self.general_splits[1]
graph_train = Graph(
self._edge_subgraph_with_isonodes(
self.G,
edges_train,
),
disjoint_split=(
self.disjoint_split
),
negative_edges=(
self.negative_edges
)
)
graph_train.negative_label_val = self.negative_label_val
graph_val = copy.copy(graph_train)
if split_num == 3:
edges_test = self.general_splits[2]
graph_test = Graph(
self._edge_subgraph_with_isonodes(
self.G,
edges_train + edges_val
),
negative_edges=(
self.negative_edges
)
)
graph_test.negative_label_val = self.negative_label_val
graph_train._create_label_link_pred(
graph_train, edges_train
)
graph_val._create_label_link_pred(
graph_val, edges_val
)
if split_num == 3:
graph_test._create_label_link_pred(
graph_test, edges_test
)
graph_train._is_train = True
split_graph.append(graph_train)
split_graph.append(graph_val)
if split_num == 3:
split_graph.append(graph_test)
return split_graph
def split_link_pred(
self,
split_ratio: Union[float, List[float]],
shuffle: bool = True
):
r"""
Split the graph into `len(split_ratio)` graphs for
the link prediction task. Internally this function splits the edge indices, and
the model will only compute loss for the node embeddings in each splitted graph.
This is only used for the transductive link prediction task.
In this task, different parts of the graph are observed in train / val / test.
If during training, we might further split the training graph for the
message edges and supervision edges.
.. note::
This functon will be called twice.
Args:
split_ratio (float or list): The ratio or list of ratios.
shuffle (bool): Whether to shuffle for the splitting.
Returns:
list: A list of :class:`Graph` objects.
"""
if isinstance(split_ratio, float):
split_ratio = [split_ratio, 1 - split_ratio]
if len(split_ratio) < 2 or len(split_ratio) > 3:
raise ValueError("Unrecoginzed number of splits")
if self.num_edges < len(split_ratio):
raise ValueError(
"In _split_link_pred num of edges are smaller than"
"number of splitted parts."
)
if self.G is not None:
edges = list(self.G.edges(data=True))
if shuffle:
random.shuffle(edges)
else:
if shuffle:
edges = torch.randperm(self.num_edges)
else:
edges = torch.arange(self.num_edges)
# Perform `secure split` s.t. guarantees all splitted subgraph
# that contains at least one edge.
if len(split_ratio) == 2:
num_edges_train = int(split_ratio[0] * self.num_edges)
num_edges_val = self.num_edges - num_edges_train
if (
(num_edges_train == 0)
or (num_edges_val == 0)
):
num_edges_train = (
1 + int(split_ratio[0] * (self.num_edges - 2))
)
edges_train = edges[:num_edges_train]
edges_val = edges[num_edges_train:]
elif len(split_ratio) == 3:
num_edges_train = int(split_ratio[0] * self.num_edges)
num_edges_val = int(split_ratio[1] * self.num_edges)
num_edges_test = self.num_edges - num_edges_train - num_edges_val
if (
(num_edges_train == 0)
or (num_edges_val == 0)
or (num_edges_test == 0)
):
num_edges_train = (
1 + int(split_ratio[0] * (self.num_edges - 3))
)
num_edges_val = 1 + int(split_ratio[1] * (self.num_edges - 3))
edges_train = edges[:num_edges_train]
edges_val = edges[num_edges_train:num_edges_train + num_edges_val]
edges_test = edges[num_edges_train + num_edges_val:]
if self.G is not None:
graph_train = Graph(
self._edge_subgraph_with_isonodes(self.G, edges_train)
)
if hasattr(self, "negative_label_val"):
graph_train.negative_label_val = self.negative_label_val
else:
graph_train = copy.copy(self)
# update the edge_index
edge_index = torch.index_select(
self.edge_index, 1, edges_train
)
if self.is_undirected():
edge_index = torch.cat(
[edge_index, torch.flip(edge_index, [0])], dim=1
)
# update edge features
graph_train.edge_index = edge_index
for key in graph_train.keys:
if self._is_edge_attribute(key):
edge_feature = torch.index_select(
self[key], 0, edges_train
)
if self.is_undirected():
edge_feature = torch.cat(
[edge_feature, edge_feature], dim=0
)
graph_train[key] = edge_feature
# in tensor backend, store the original self.edge_label
graph_train._edge_label = copy.deepcopy(self.edge_label)
graph_val = copy.copy(graph_train)
if len(split_ratio) == 3:
if self.G is not None:
graph_test = Graph(
self._edge_subgraph_with_isonodes(
self.G, edges_train + edges_val
)
)
if hasattr(self, "negative_label_val"):
graph_test.negative_label_val = self.negative_label_val
else:
graph_test = copy.copy(self)
edge_index = torch.index_select(
self.edge_index, 1, torch.cat([edges_train, edges_val])
)
if self.is_undirected():
edge_index = torch.cat(
[edge_index, torch.flip(edge_index, [0])],
dim=1
)
graph_test.edge_index = edge_index
for key in graph_test.keys:
if self._is_edge_attribute(key):
edge_feature = torch.index_select(
self[key], 0, torch.cat([edges_train, edges_val])
)
if self.is_undirected():
edge_feature = torch.cat(
[edge_feature, edge_feature], dim=0
)
graph_test[key] = edge_feature
self._create_label_link_pred(graph_train, edges_train)
self._create_label_link_pred(graph_val, edges_val)
graph_train._is_train = True
if len(split_ratio) == 3:
self._create_label_link_pred(graph_test, edges_test)
return [graph_train, graph_val, graph_test]
else:
return [graph_train, graph_val]
def _edge_subgraph_with_isonodes(self, G, edges):
r"""
Generate a new networkx graph with same nodes and their attributes.
Preserves all nodes and a subset of edges. Nodes that are not connected
by any of the edges will be isolated nodes instead of being removed.
Note:
edges should be list(G_i.edges(data=True))
"""
G_new = G.__class__()
G_new.add_nodes_from(G.nodes(data=True))
G_new.add_edges_from(edges)
return G_new
def resample_disjoint(self, message_ratio: float):
r"""
Resample splits of the message passing and supervision edges in the
`disjoint` mode.
.. note::
If :meth:`apply_transform` (on the message passing graph)
was used before this resampling, it needs to be
re-applied after resampling, to update some of the (supervision)
edges that were in the objectives.
Args:
message_ratio(float): Split ratio.
"""
if not hasattr(self, "_objective_edges"):
raise ValueError("No disjoint edge split was performed.")
# Combine into 1 graph
if not hasattr(self, "_resample_disjoint_idx"):
self._resample_disjoint_idx = 0
resample_disjoint_period = self.resample_disjoint_period
if self._resample_disjoint_idx == (resample_disjoint_period - 1):
if self.G is not None:
graph = self
graph.G.add_edges_from(self._objective_edges)
else:
graph = copy.deepcopy(self)
edge_index = graph.edge_index[:, 0:self.num_edges]
# recover full edge_index
edge_index = torch.cat(
[edge_index, graph._objective_edges], dim=1
)
if graph.is_undirected():
edge_index = torch.cat(
[edge_index, torch.flip(edge_index, [0])], dim=1
)
graph.edge_index = edge_index
# recover full edge attributes
for key in graph._objective_edges_attribute:
if graph._is_edge_attribute(key):
graph[key] = torch.cat(
[
graph[key],
graph._objective_edges_attribute[key]
],
dim=0
)
if graph.is_undirected():
graph[key] = torch.cat(
[graph[key], graph[key]], dim=0
)
graph.edge_label = graph._edge_label
graph = graph.split_link_pred(message_ratio)[1]
graph._is_train = True
graph._resample_disjoint_flag = True
else:
graph = self
graph._resample_disjoint_flag = False
graph._resample_disjoint_idx = (
(self._resample_disjoint_idx + 1)
% resample_disjoint_period
)
return graph
def _create_label_link_pred(self, graph, edges):
r"""
Create edge label and the corresponding label_index (edges)
for link prediction.
Modifies the graph argument by setting the fields edge_label_index
and edge_label.
Notice when the graph is tensor backend, the edges are the
indices of edges.
"""
if self.G is not None:
graph.edge_label_index = self._edge_to_index(edges)
graph.edge_label = (
self._get_edge_attributes_by_key(edges, "edge_label")
)
# Keep a copy of original edges (and their attributes)
# for resampling the disjoint split
# (message passing and objective links)
graph._objective_edges = edges
else:
edge_label_index = torch.index_select(
self.edge_index, 1, edges
)
# store objective edges
graph._objective_edges = copy.deepcopy(edge_label_index)
if self.is_undirected():
edge_label_index = torch.cat(
[edge_label_index, torch.flip(edge_label_index, [0])],
dim=1
)
graph.edge_label_index = edge_label_index
graph.edge_label = (
self._get_edge_attributes_by_key_tensor(edges, "edge_label")
)
# store objective edge attributes
objective_edges_attribute = {}
for key in graph.keys:
if self._is_edge_attribute(key) and (key != "edge_label"):
edge_attribute = torch.index_select(
self[key], 0, edges
)
objective_edges_attribute[key] = edge_attribute
graph._objective_edges_attribute = objective_edges_attribute
def _custom_create_neg_sampling(
self, negative_sampling_ratio: float, resample: bool = False
):
r"""
custom support version of _create_neg_sampling where negative edges
are provided as self.negative_edge
Args:
negative_sampling_ratio (float or int): ratio of negative sampling
edges compared with the original edges.
resample (boolean): whether should resample.
"""
if resample and self._num_positive_examples is not None:
self.edge_label_index = self.edge_label_index[
:, :self._num_positive_examples
]
num_pos_edges = self.edge_label_index.shape[-1]
num_neg_edges = int(num_pos_edges * negative_sampling_ratio)
if self.edge_index.size() == self.edge_label_index.size() and (
torch.sum(self.edge_index - self.edge_label_index) == 0
):
# (train in 'all' mode)
edge_index_all = self.edge_index
else:
edge_index_all = (
torch.cat([self.edge_index, self.edge_label_index], -1)
)
if len(edge_index_all) > 0:
if not torch.is_tensor(self.negative_edge):
negative_edges_length = len(self.negative_edge)
if negative_edges_length < num_neg_edges:
multiplicity = math.ceil(
num_neg_edges / negative_edges_length
)
self.negative_edge = self.negative_edge * multiplicity
self.negative_edge = self.negative_edge[:num_neg_edges]
self.negative_edge = torch.tensor(
list(zip(*self.negative_edge))
)
if not hasattr(self, "_negative_edge_idx"):
self._negative_edge_idx = 0
negative_edges = self.negative_edge
negative_edges_length = negative_edges.shape[1]
if self._negative_edge_idx + num_neg_edges > negative_edges_length:
negative_edges_begin = (
negative_edges[:, self._negative_edge_idx:]
)
negative_edges_end = negative_edges[
:, :self._negative_edge_idx
+ num_neg_edges - negative_edges_length
]
negative_edges = torch.cat(
[negative_edges_begin, negative_edges_end], axis=1
)
else:
negative_edges = negative_edges[
:, self._negative_edge_idx:
self._negative_edge_idx + num_neg_edges
]
self._negative_edge_idx = (
(self._negative_edge_idx + num_neg_edges)
% negative_edges_length
)
else:
return torch.tensor([], dtype=torch.long)
if not resample:
if self.edge_label is None:
# if label is not yet specified, use all ones for positives
positive_label = torch.ones(num_pos_edges, dtype=torch.long)
# if label is not yet specified, use all zeros for positives
negative_label = torch.zeros(num_neg_edges, dtype=torch.long)
else:
# if label is specified, use max(positive_label) + 1
# for negative labels
positive_label = self.edge_label
negative_label_val = self.negative_label_val
negative_label = (
negative_label_val
* torch.ones(num_neg_edges, dtype=torch.long)
)
self.edge_label = (
torch.cat(
[positive_label, negative_label], -1
).type(torch.long)
)
# append to edge_label_index
self.edge_label_index = (
torch.cat([self.edge_label_index, negative_edges], -1)
)
self._num_positive_examples = num_pos_edges
def _create_neg_sampling(
self, negative_sampling_ratio: float, resample: bool = False
):
r"""
Create negative samples for link prediction,
and changes the edge_label and edge_label_index accordingly
(if already existed).
Simplest link prediction has no label. It will be treated as
binary classification.
edge_label will be set to 1 for positives and 0 for negative examples.
For link prediction that requires prediction of edge type,
it will be a multi-class classification task.
edge_label will be set to the (original label + 1) for positives
and 0 for negative examples.
Hence the number of prediction classes will be incremented by 1.
In this case dataset.num_edge_labels should be called after split
(which calls this function).
Args:
negative_sampling_ratio (float or int): ratio of negative sampling
edges compared with the original edges.
resample (boolean): whether should resample.
"""
if resample and self._num_positive_examples is not None:
# remove previous negative samples first
# if self._num_positive_examples is None then
# no previous sampling was done
self.edge_label_index = self.edge_label_index[
:, :self._num_positive_examples
]
num_pos_edges = self.edge_label_index.shape[-1]
num_neg_edges = int(num_pos_edges * negative_sampling_ratio)
if self.edge_index.size() == self.edge_label_index.size() and (
torch.sum(self.edge_index - self.edge_label_index) == 0
):
# (train in 'all' mode)
edge_index_all = self.edge_index
else:
edge_index_all = (
torch.cat([self.edge_index, self.edge_label_index], -1)
)
# handle multigraph
if hasattr(self, "_edge_index_all"):
if not torch.equal(self._edge_index_all, edge_index_all):
edge_index_all_unique = torch.unique(edge_index_all, dim=1)
else:
edge_index_all_unique = self._edge_index_all_unique
else:
edge_index_all_unique = torch.unique(edge_index_all, dim=1)
self._edge_index_all = edge_index_all
self._edge_index_all_unique = edge_index_all_unique
negative_edges = self.negative_sampling(
edge_index_all_unique, self.num_nodes, num_neg_edges
)
if not resample:
if self.edge_label is None:
# if label is not yet specified, use all ones for positives
positive_label = torch.ones(num_pos_edges, dtype=torch.long)
# if label is not yet specified, use all zeros for positives
negative_label = torch.zeros(num_neg_edges, dtype=torch.long)
else:
positive_label = self.edge_label
negative_label_val = self.negative_label_val
negative_label = (
negative_label_val
* torch.ones(num_neg_edges, dtype=torch.long)
)
self.edge_label = (
torch.cat(
[positive_label, negative_label], -1
).type(torch.long)
)
# append negative edges to edge_label_index
self.edge_label_index = (
torch.cat([self.edge_label_index, negative_edges], -1)
)
self._num_positive_examples = num_pos_edges
@staticmethod
def add_node_attr(G, attr_name: str, node_attr):
r"""
Add node attribute into a NetworkX graph. Assume that the
`node_attr` ordering is the same as the node ordering in `G`.
Args:
G (NetworkX Graph): A NetworkX graph.
attr_name (str): Name of the node attribute to set.
node_attr (array_like): Corresponding node attributes.
"""
# TODO: Better method here?
node_list = list(G.nodes)
attr_dict = dict(zip(node_list, node_attr))
deepsnap._netlib.set_node_attributes(G, attr_dict, name=attr_name)
@staticmethod
def add_edge_attr(G, attr_name: str, edge_attr):
r"""
Add edge attribute into a NetworkX graph.
Args:
G (NetworkX Graph): A NetworkX graph.
attr_name (str): Name of the edge attribute to set.
edge_attr (array_like): Corresponding edge attributes.
"""
# TODO: parallel?
edge_list = list(G.edges)
attr_dict = dict(zip(edge_list, edge_attr))
deepsnap._netlib.set_edge_attributes(G, attr_dict, name=attr_name)
@staticmethod
def add_graph_attr(G, attr_name: str, graph_attr):
r"""
Add graph attribute into a NetworkX graph.
Args:
G (NetworkX Graph): A NetworkX graph.
attr_name (str): Name of the graph attribute to set.
graph_attr (scalar or array_like): Corresponding
graph attributes.
"""
G.graph[attr_name] = graph_attr
@staticmethod
def pyg_to_graph(
data,
verbose: bool = False,
fixed_split: bool = False,
tensor_backend: bool = False,
netlib=None
):
r"""
Transform a :class:`torch_geometric.data.Data` object to a
:class:`Graph` object.
Args:
data (:class:`torch_geometric.data.Data`): A
:class:`torch_geometric.data.Data` object that will be
transformed to a :class:`deepsnap.grpah.Graph`
object.
verbose (bool): Whether to print information such as warnings.
fixed_split (bool): Whether to load the fixed data split from
the original PyTorch Geometric data.
tensor_backend (bool): `True` will use pure tensors for graphs.
netlib (types.ModuleType, optional): The graph backend module.
Currently DeepSNAP supports the NetworkX and SnapX (for
SnapX only the undirected homogeneous graph) as the graph
backend. Default graph backend is the NetworkX.
Returns:
:class:`Graph`: A new DeepSNAP :class:`Graph` object.
"""
# all fields in PyG Data object
kwargs = {}
kwargs["node_feature"] = data.x if "x" in data.keys else None
kwargs["edge_feature"] = (
data.edge_attr if "edge_attr" in data.keys else None
)
kwargs["node_label"], kwargs["edge_label"] = None, None
kwargs["graph_feature"], kwargs["graph_label"] = None, None
if kwargs["node_feature"] is not None and data.y.size(0) == kwargs[
"node_feature"
].size(0):
kwargs["node_label"] = data.y
elif kwargs["edge_feature"] is not None and data.y.size(0) == kwargs[
"edge_feature"
].size(0):
kwargs["edge_label"] = data.y
else:
kwargs["graph_label"] = data.y
if not tensor_backend:
if netlib is not None:
deepsnap._netlib = netlib
if data.is_directed():
G = deepsnap._netlib.DiGraph()
else:
G = deepsnap._netlib.Graph()
G.add_nodes_from(range(data.num_nodes))
G.add_edges_from(data.edge_index.T.tolist())
else:
attributes = {}
if not data.is_directed():
row, col = data.edge_index
mask = row < col
row, col = row[mask], col[mask]
edge_index = torch.stack([row, col], dim=0)
edge_index = torch.cat(
[edge_index, torch.flip(edge_index, [0])],
dim=1
)
else:
edge_index = data.edge_index
attributes["edge_index"] = edge_index
# include other arguments that are in the kwargs of pyg data object
keys_processed = ["x", "y", "edge_index", "edge_attr"]
for key in data.keys:
if key not in keys_processed:
kwargs[key] = data[key]
# we assume that edge-related and node-related features are defined
# the same as in Graph._is_edge_attribute and Graph._is_node_attribute
for key, value in kwargs.items():
if value is None:
continue
if Graph._is_node_attribute(key):
if not tensor_backend:
Graph.add_node_attr(G, key, value)
else:
attributes[key] = value
elif Graph._is_edge_attribute(key):
# TODO: make sure the indices of edge attributes are same with edge_index
if not tensor_backend:
# the order of edge attributes is consistent
# with edge index
Graph.add_edge_attr(G, key, value)
else:
attributes[key] = value
elif Graph._is_graph_attribute(key):
if not tensor_backend:
Graph.add_graph_attr(G, key, value)
else:
attributes[key] = value
else:
if verbose:
print(f"Index fields: {key} ignored.")
if fixed_split:
masks = ["train_mask", "val_mask", "test_mask"]
if not tensor_backend:
graph = Graph(G, netlib=netlib)
else:
graph = Graph(**attributes)
if graph.edge_label is not None:
graph.negative_label_val = torch.max(graph.edge_label) + 1
graphs = []
for mask in masks:
if mask in kwargs:
graph_new = copy.copy(graph)
graph_new.node_label_index = (
torch.nonzero(data[mask]).squeeze()
)
graph_new.node_label = (
graph_new.node_label[graph_new.node_label_index]
)
graphs.append(graph_new)
return graphs
else:
if not tensor_backend:
return Graph(G, netlib=netlib)
else:
graph = Graph(**attributes)
if graph.edge_label is not None:
graph.negative_label_val = torch.max(graph.edge_label) + 1
return graph
@staticmethod
def raw_to_graph(data):
r"""
Write other methods for user to import their own data format and
make sure all attributes of G are scalar or :class:`torch.Tensor`.
``Not implemented``
"""
raise NotImplementedError
@staticmethod
def negative_sampling(edge_index, num_nodes: int, num_neg_samples: int):
r"""
Samples random negative edges for a heterogeneous graph given
by :attr:`edge_index`.
Args:
edge_index (LongTensor): The indices for edges.
num_nodes (int): Number of nodes.
num_neg_samples (int): The number of negative samples to
return.
Returns:
:class:`torch.LongTensor`: The :attr:`edge_index` tensor
for negative edges.
"""
num_neg_samples_available = min(
num_neg_samples, num_nodes * num_nodes - edge_index.shape[1]
)
if num_neg_samples_available == 0:
raise ValueError(
"No negative samples could be generated for a complete graph."
)
rng = range(num_nodes ** 2)
# idx = N * i + j
idx = (edge_index[0] * num_nodes + edge_index[1]).to("cpu")
perm = torch.tensor(random.sample(rng, num_neg_samples_available))
mask = torch.from_numpy(np.isin(perm, idx)).to(torch.bool)
rest = torch.nonzero(mask).view(-1)
while rest.numel() > 0: # pragma: no cover
tmp = torch.tensor(random.sample(rng, rest.size(0)))
mask = torch.from_numpy(np.isin(tmp, idx)).to(torch.bool)
perm[rest] = tmp
rest = rest[torch.nonzero(mask).view(-1)]
row = perm // num_nodes
col = perm % num_nodes
neg_edge_index = torch.stack([row, col], dim=0).long()
if num_neg_samples_available < num_neg_samples:
multiplicity = math.ceil(
num_neg_samples / num_neg_samples_available
)
neg_edge_index = torch.cat([neg_edge_index] * multiplicity, dim=1)
neg_edge_index = neg_edge_index[:, :num_neg_samples]
return neg_edge_index.to(edge_index.device)
|
python/floor-ceil-and-rint.py | gajubadge11/HackerRank-1 | 340 | 11073916 | <reponame>gajubadge11/HackerRank-1
import numpy as np
array = np.array(list(input().strip().split()), dtype = float)
print(np.floor(array))
print(np.ceil(array))
print(np.rint(array)) |
tests/test_utils.py | gaurav17467/free-python-games | 2,478 | 11073929 | <filename>tests/test_utils.py
from pytest import raises
import freegames.utils as utils
def test_change_after_hash():
v = utils.vector(0, 0)
hash(v)
with raises(ValueError):
v.x = 1
with raises(ValueError):
v.y = 1
with raises(ValueError):
v += 1
with raises(ValueError):
v -= 1
with raises(ValueError):
v *= 2
with raises(ValueError):
v /= 2
with raises(ValueError):
v.rotate(90)
def test_not_implemented_paths():
v = utils.vector(0, 0)
assert not (v == 0)
assert v != 0
|
demos/feature_demo.py | cuongptnk/imutils | 3,984 | 11073994 | <filename>demos/feature_demo.py
# author: <NAME>
# website: http://www.pyimagesearch.com
# USAGE
# python feature_demo.py
# import the necessary packages
from imutils.feature import DescriptorExtractor_create
from imutils.feature import FeatureDetector_create
from imutils.feature import corners_to_keypoints
# ensure the keypoint detection and local invariant descriptors are
# working properly
detector = FeatureDetector_create("SIFT")
extractor = DescriptorExtractor_create("SIFT")
print(detector)
print(extractor)
print(corners_to_keypoints) |
tests/components/plant/__init__.py | domwillcode/home-assistant | 30,023 | 11074008 | """Tests for the plant component."""
|
optimus/engines/base/dask/functions.py | ironmussa/Optimus | 1,045 | 11074014 | <reponame>ironmussa/Optimus<filename>optimus/engines/base/dask/functions.py<gh_stars>1000+
from abc import abstractmethod
import dask
import dask.dataframe as dd
import hiurlparser
import numpy as np
from dask.delayed import delayed
from dask_ml.preprocessing import MinMaxScaler, StandardScaler
from sklearn.preprocessing import MaxAbsScaler
from dask_ml.impute import SimpleImputer
from optimus.engines.base.distributed.functions import DistributedBaseFunctions
from optimus.helpers.core import one_tuple_to_val
class DaskBaseFunctions(DistributedBaseFunctions):
_engine = dask
@staticmethod
@property
def _functions(self):
return dd
@staticmethod
def _new_series(series, *args, **kwargs):
if isinstance(series, dd.Series):
return series
return dd.from_array(series, *args, **kwargs)
@staticmethod
def compute(*args, **kwargs):
result = dask.compute(*(*(a for a in args), *(kwargs[k] for k in kwargs)))
return one_tuple_to_val(result)
@abstractmethod
def from_dataframe(self, dfd):
pass
@staticmethod
def to_dataframe(dfd):
return dfd.compute()
@staticmethod
def df_concat(df_list):
return dd.concat(df_list, axis=0)
def new_df(self, *args, **kwargs):
if len(args) < 4 and all([k not in kwargs for k in ['dsk', 'name', 'meta', 'divisions']]):
return self.from_dataframe(self._partition_engine.DataFrame(*args, **kwargs))
return dd.DataFrame(*args, **kwargs)
@staticmethod
def dask_to_compatible(dfd):
return dfd
def sort_df(self, dfd, cols, ascending):
for c, a in list(zip(cols, ascending))[::-1]:
dfd = dfd.sort_values(c)
if not a:
dfd = self.reverse_df(dfd)
return dfd.reset_index(drop=True)
def reverse_df(self, dfd):
@delayed
def reverse_pdf(pdf):
return pdf[::-1]
ds = dfd.to_delayed()
ds = [reverse_pdf(d) for d in ds][::-1]
return dd.from_delayed(ds)
def to_float(self, series):
if getattr(series, "map_partitions", False):
return self.map_partitions(series, self._to_float)
else:
return self._to_float(series)
def to_integer(self, series, default=0):
if getattr(series, "map_partitions", False):
return self.map_partitions(series, self._to_integer, default=default)
else:
return self._to_integer(series, default=default)
def to_datetime(self, series):
if getattr(series, "map_partitions", False):
return self.map_partitions(series, self._to_datetime)
else:
return self._to_float(series)
def duplicated(self, dfd, keep, subset):
return self.from_dataframe(self.to_dataframe(dfd).duplicated(keep=keep, subset=subset))
def impute(self, series, strategy, fill_value):
imputer = SimpleImputer(strategy=strategy, fill_value=fill_value)
series_fit = series.dropna()
if str(series.dtype) in self.constants.OBJECT_TYPES:
series_fit = series_fit.astype(str)
values = series_fit.values.reshape(-1, 1)
if len(values):
imputer.fit(values)
return imputer.transform(series.fillna(np.nan).values.reshape(-1, 1))
else:
logger.warn("list to fit imputer is empty, try cols.fill_na instead.")
return series
@staticmethod
def delayed(func):
def wrapper(*args, **kwargs):
return dask.delayed(func)(*args, **kwargs)
return wrapper
def from_delayed(self, delayed):
return dd.from_delayed(delayed)
def to_delayed(self, value):
return value.to_delayed()
def mode(self, series):
# Uses delayed wrapper to avoid unexpected Dask errors
# TODO should this be done in every static function called from Dask instead?
@self.delayed
def compute_mode(series):
# dfd = series.rename(0).value_counts().reset_index(drop=False)
# _max = dfd[0].max(skipna=True)
# return dfd[dfd[0] == _max]['index'].rename(series.name)
return series.mode()
return compute_mode(series)
def count_zeros(self, series):
return int((self.to_float(series).values == 0).sum())
def standard_scaler(self, series):
dfd = StandardScaler().fit_transform(self.to_float(series).to_frame())
return dfd[dfd.columns[0]]
def max_abs_scaler(self, series):
return MaxAbsScaler().fit_transform(self.compute(self.to_float(series)).values.reshape(-1, 1))
def min_max_scaler(self, series):
dfd = MinMaxScaler().fit_transform(self.to_float(series).to_frame())
return dfd[dfd.columns[0]]
# @staticmethod
# def heatmap(df, bins):
# counts, edges = da.histogramdd((df['x'], df['y'].values), bins=bins)
# return counts, edges[0], edges[1]
def domain(self, series):
return self.to_string(series).map(lambda v: hiurlparser.parse_url(v)["domain"], na_action=None,
meta=(series.name, "str"))
def top_domain(self, series):
return self.to_string(series).map(lambda v: hiurlparser.parse_url(v)["top_domain"], na_action=None,
meta=(series.name, "str"))
def sub_domain(self, series):
return self.to_string(series).map(lambda v: hiurlparser.parse_url(v)["sub_domain"], na_action=None,
meta=(series.name, "str"))
def url_scheme(self, series):
return self.to_string(series).map(lambda v: hiurlparser.parse_url(v)["protocol"], na_action=None,
meta=(series.name, "str"))
def url_path(self, series):
return self.to_string(series).map(lambda v: hiurlparser.parse_url(v)["path"], na_action=None,
meta=(series.name, "str"))
def url_file(self, series):
return self.to_string(series).map(lambda v: hiurlparser.parse_url(v)["file"], na_action=None,
meta=(series.name, "str"))
def url_query(self, series):
return self.to_string(series).map(lambda v: hiurlparser.parse_url(v)["query"], na_action=None,
meta=(series.name, "str"))
def url_fragment(self, series):
return self.to_string(series).map(lambda v: hiurlparser.parse_url(v)["fragment"], na_action=None,
meta=(series.name, "str"))
def host(self, series):
return self.to_string(series).map(lambda v: hiurlparser.parse_url(v)["host"], na_action=None,
meta=(series.name, "str"))
def port(self, series):
return self.to_string(series).map(lambda v: hiurlparser.parse_url(v)["port"], na_action=None,
meta=(series.name, "str"))
|
python/paddle/optimizer/lamb.py | wwqgtxx/Paddle | 17,085 | 11074080 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .optimizer import Optimizer
from ..fluid import core
from ..fluid import framework
from ..fluid.framework import Variable
from paddle import _C_ops
__all__ = []
class Lamb(Optimizer):
r"""
LAMB (Layer-wise Adaptive Moments optimizer for Batching training) Optimizer.
LAMB Optimizer is designed to scale up the batch size of training without losing
accuracy, which supports adaptive element-wise updating and accurate layer-wise
correction. For more information, please refer to `Large Batch Optimization for
Deep Learning: Training BERT in 76 minutes <https://arxiv.org/abs/1904.00962>`_ .
The updating of parameters follows:
.. math::
m_t &= \beta_1 m_{t - 1}+ (1 - \beta_1)g_t
v_t &= \beta_2 v_{t - 1} + (1 - \beta_2)g_t^2
m_t &= \frac{m_t}{\beta_1^t}
v_t &= \frac{v_t}{\beta_2^t}
r_t &= \frac{m_t}{\sqrt{v_t}+\epsilon}
w_t &= w_{t-1} -\eta_t \frac{\left \| w_{t-1}\right \|}{\left \| r_t + \lambda w_{t-1}\right \|} (r_t + \lambda w_{t-1})
where :math:`m` is the 1st moment, and :math:`v` the 2nd moment, :math:`\\eta` the
learning rate, :math:`\\lambda` the LAMB weight decay rate.
Args:
learning_rate (float|Variable, optional): the learning rate used to update parameters. \
Can be a float value or a Variable with data type float32. Default 0.001.
lamb_weight_decay (float, optional): The LAMB weight decay rate. Default 0.01. Remind that weight_decay should be None.
beta1 (float, optional): The exponential decay rate for the 1st moment estimates.
Default 0.9.
beta2 (float, optional): The exponential decay rate for the 2nd moment estimates.
Default 0.999.
epsilon (float, optional): A small float value for numerical stability. Default 1e-6.
parameters (Iterable, optional): Iterable of ``Variable`` names to update to minimize ``loss``. \
This parameter is required in dygraph mode. And you can specify different options for \
different parameter groups such as the learning rate, weight decay, etc, \
then the parameters are list of dict. Note that the learning_rate in paramter groups \
represents the scale of base learning_rate. \
The default value is None in static mode, at this time all parameters will be updated.
grad_clip (GradientClipBase, optional): Gradient cliping strategy, it's an instance of
some derived class of ``GradientClipBase`` . There are three cliping strategies
( :ref:`api_paddle_fluid_clip_ClipGradByGlobalNorm` , :ref:`api_paddle_fluid_clip_ClipGradByNorm` ,
:ref:`api_paddle_fluid_clip_ClipGradByValue` ). If you want better convergence, it is recommended
to use :ref:`api_paddle_fluid_clip_ClipGradByGlobalNorm` . Default None, meaning there is no gradient clipping.
name(str|None): For detailed information, please refer to
:ref:`api_guide_Name` . Usually name is no need to set and None by default.
Examples:
.. code-block:: python
import paddle
inp = paddle.uniform(shape=[10, 10], dtype='float32', min=-0.1, max=0.1)
linear = paddle.nn.Linear(10, 10)
out = linear(inp)
loss = paddle.mean(out)
beta1 = paddle.to_tensor([0.9], dtype="float32")
beta2 = paddle.to_tensor([0.85], dtype="float32")
lamb = paddle.optimizer.Lamb(learning_rate=0.002, parameters=linear.parameters(), lamb_weight_decay=0.01)
back = out.backward()
lamb.step()
lamb.clear_grad()
"""
_moment1_acc_str = "moment1"
_moment2_acc_str = "moment2"
_beta1_pow_acc_str = "beta1_pow_acc"
_beta2_pow_acc_str = "beta2_pow_acc"
def __init__(self,
learning_rate=0.001,
lamb_weight_decay=0.01,
beta1=0.9,
beta2=0.999,
epsilon=1e-6,
parameters=None,
grad_clip=None,
exclude_from_weight_decay_fn=None,
name=None):
assert learning_rate is not None
assert beta1 is not None
assert beta2 is not None
assert epsilon is not None
super(Lamb, self).__init__(
learning_rate=learning_rate,
parameters=parameters,
weight_decay=None,
grad_clip=grad_clip,
name=name)
self.type = "lamb"
self._beta1 = beta1
self._beta2 = beta2
self._epsilon = epsilon
self._lamb_weight_decay = lamb_weight_decay
self._exclude_from_weight_decay_fn = exclude_from_weight_decay_fn
self._default_dict = {
'beta1': beta1,
'beta2': beta2,
'epsilon': epsilon,
'lamb_weight_decay': lamb_weight_decay,
'exclude_from_weight_decay_fn': exclude_from_weight_decay_fn,
}
def _create_accumulators(self, block, parameters):
assert isinstance(block, framework.Block)
if isinstance(parameters, dict):
parameters = self._update_param_group(parameters)
# Create accumulator tensors for first and second moments
for p in parameters:
self._add_accumulator(self._moment1_acc_str, p)
self._add_accumulator(self._moment2_acc_str, p)
self._add_accumulator(
name=self._beta1_pow_acc_str,
param=p,
fill_value=0.9 if isinstance(self._beta1, Variable) \
else self._beta1,
shape=[1],
type=core.VarDesc.VarType.LOD_TENSOR, device='cpu')
self._add_accumulator(
name=self._beta2_pow_acc_str,
param=p,
fill_value=0.999 if isinstance(self._beta2, Variable) \
else self._beta2,
shape=[1],
type=core.VarDesc.VarType.LOD_TENSOR, device='cpu')
def _append_optimize_op(self, block, param_and_grad):
assert isinstance(block, framework.Block)
if isinstance(param_and_grad, dict):
param_and_grad = self._update_param_group(param_and_grad)
block.program._use_lamb = True
moment1 = self._get_accumulator(self._moment1_acc_str,
param_and_grad[0])
moment2 = self._get_accumulator(self._moment2_acc_str,
param_and_grad[0])
beta1_pow_acc = self._get_accumulator(self._beta1_pow_acc_str,
param_and_grad[0])
beta2_pow_acc = self._get_accumulator(self._beta2_pow_acc_str,
param_and_grad[0])
if self._exclude_from_weight_decay_fn is not None \
and self._exclude_from_weight_decay_fn(param_and_grad[0]):
weight_decay = 0.0
else:
weight_decay = self._lamb_weight_decay
lr = self._create_param_lr(param_and_grad)
if framework.in_dygraph_mode():
_, _, _, _, _ = _C_ops.lamb(
param_and_grad[0], param_and_grad[1], lr, moment1, moment2,
beta1_pow_acc, beta2_pow_acc, param_and_grad[0], moment1,
moment2, beta1_pow_acc, beta2_pow_acc, 'beta1', self._beta1,
'beta2', self._beta2, 'epsilon', self._epsilon, 'weight_decay',
weight_decay)
return None
# create the lamb optimize op
inputs = {
"Param": param_and_grad[0],
"Grad": param_and_grad[1],
"LearningRate": lr,
"Moment1": moment1,
"Moment2": moment2,
"Beta1Pow": beta1_pow_acc,
"Beta2Pow": beta2_pow_acc
}
outputs = {
"ParamOut": param_and_grad[0],
"Moment1Out": moment1,
"Moment2Out": moment2,
"Beta1PowOut": beta1_pow_acc,
"Beta2PowOut": beta2_pow_acc
}
attrs = {
"beta1": self._beta1,
"beta2": self._beta2,
"epsilon": self._epsilon,
"weight_decay": weight_decay
}
lamb_op = block.append_op(
type=self.type,
inputs=inputs,
outputs=outputs,
attrs=attrs,
stop_gradient=True)
return lamb_op
def _update_param_group(self, parameters):
self._beta1 = parameters.get('beta1', self._default_dict['beta1'])
self._beta2 = parameters.get('beta2', self._default_dict['beta2'])
self._epsilon = parameters.get('epsilon', self._default_dict['epsilon'])
self._lamb_weight_decay = parameters.get(
'lamb_weight_decay', self._default_dict['lamb_weight_decay'])
self._exclude_from_weight_decay_fn = parameters.get(
'exclude_from_weight_decay_fn',
self._default_dict['exclude_from_weight_decay_fn'])
parameters = parameters.get('params')
return parameters
|
tests/validate/validate_object_test.py | nickgaya/bravado-core | 122 | 11074088 | <reponame>nickgaya/bravado-core
# -*- coding: utf-8 -*-
import pytest
from jsonschema.exceptions import ValidationError
from six import iteritems
from bravado_core.spec import Spec
from bravado_core.validate import validate_object
from tests.conftest import get_url
from tests.validate.conftest import email_address_format
@pytest.fixture
def address_spec():
return {
'type': 'object',
'properties': {
'number': {
'type': 'number',
},
'street_name': {
'type': 'string',
},
'street_type': {
'type': 'string',
'enum': [
'Street',
'Avenue',
'Boulevard',
],
},
},
}
@pytest.fixture
def allOf_spec(address_spec):
return {
'allOf': [
{
'type': 'object',
'properties': {
'business': {
'type': 'string',
},
},
'required': ['business'],
},
address_spec,
],
}
def test_success(minimal_swagger_spec, address_spec):
address = {
'number': 1000,
'street_name': 'Main',
'street_type': 'Street',
}
validate_object(minimal_swagger_spec, address_spec, address)
def test_leaving_out_property_OK(minimal_swagger_spec, address_spec):
address = {
'street_name': 'Main',
'street_type': 'Street',
}
validate_object(minimal_swagger_spec, address_spec, address)
def test_additional_property_OK(minimal_swagger_spec, address_spec):
address = {
'number': 1000,
'street_name': 'Main',
'street_type': 'Street',
'city': 'Swaggerville',
}
validate_object(minimal_swagger_spec, address_spec, address)
def test_required_OK(minimal_swagger_spec, address_spec):
address_spec['required'] = ['number']
address = {
'street_name': 'Main',
'street_type': 'Street',
}
with pytest.raises(ValidationError) as excinfo:
validate_object(minimal_swagger_spec, address_spec, address)
assert 'is a required property' in str(excinfo.value)
def test_property_with_no_schema(minimal_swagger_spec, address_spec):
address = {
'number': 1000,
'street_name': 'Main',
'street_type': 'Street',
}
del address_spec['properties']['street_name']['type']
validate_object(minimal_swagger_spec, address_spec, address)
@pytest.fixture
def email_address_object_spec():
return {
'type': 'object',
'required': ['email_address'],
'properties': {
'email_address': {
'type': 'string',
'format': 'email_address',
},
},
}
def test_user_defined_format_success(minimal_swagger_spec, email_address_object_spec):
request_body = {'email_address': '<EMAIL>'}
minimal_swagger_spec.register_format(email_address_format)
# No exception thrown == success
validate_object(
minimal_swagger_spec,
email_address_object_spec, request_body,
)
def test_user_defined_format_failure(minimal_swagger_spec, email_address_object_spec):
request_body = {'email_address': 'i_am_not_a_valid_email_address'}
minimal_swagger_spec.register_format(email_address_format)
with pytest.raises(ValidationError) as excinfo:
validate_object(
minimal_swagger_spec, email_address_object_spec,
request_body,
)
assert "'i_am_not_a_valid_email_address' is not a 'email_address'" in \
str(excinfo.value)
def test_user_defined_format_sensitive_failure(minimal_swagger_spec, email_address_object_spec):
object_properties = email_address_object_spec['properties']
object_properties['email_address']['x-sensitive'] = True
request_body = {'email_address': 'i_am_not_a_valid_email_address'}
minimal_swagger_spec.register_format(email_address_format)
with pytest.raises(ValidationError) as excinfo:
validate_object(
minimal_swagger_spec, email_address_object_spec,
request_body,
)
assert "'i_am_not_a_valid_email_address'" not in str(excinfo.value)
def test_builtin_format_still_works_when_user_defined_format_used(minimal_swagger_spec):
ipaddress_spec = {
'type': 'object',
'required': ['ipaddress'],
'properties': {
'ipaddress': {
'type': 'string',
'format': 'ipv4',
},
},
}
request_body = {'ipaddress': 'not_an_ip_address'}
minimal_swagger_spec.register_format(email_address_format)
with pytest.raises(ValidationError) as excinfo:
validate_object(minimal_swagger_spec, ipaddress_spec, request_body)
assert "'not_an_ip_address' is not a 'ipv4'" in str(excinfo.value)
def test_recursive_ref_depth_1(recursive_swagger_spec):
validate_object(
recursive_swagger_spec,
{'$ref': '#/definitions/Node'},
{'name': 'foo'},
)
def test_recursive_ref_depth_n(recursive_swagger_spec):
value = {
'name': 'foo',
'child': {
'name': 'bar',
'child': {
'name': 'baz',
},
},
}
validate_object(
recursive_swagger_spec,
{'$ref': '#/definitions/Node'},
value,
)
def test_recursive_ref_depth_n_failure(recursive_swagger_spec):
value = {
'name': 'foo',
'child': {
'name': 'bar',
'child': {
'kaboom': 'baz', # <-- key should be 'name', not 'kabbom'
},
},
}
with pytest.raises(ValidationError) as excinfo:
validate_object(
recursive_swagger_spec,
{'$ref': '#/definitions/Node'},
value,
)
assert "'name' is a required property" in str(excinfo.value)
# x-nullable validation
# ---------------------
# If the value is an object, validation should pass if
# `x-nullable` is `True` and the value is `None`. `required` doesn't
# have an influence.
#
# +---------------------+-------------------------+--------------------------+
# | | required == False | required == True |
# +---------------------+-------------------------+--------------------------+
# | x-nullable == False | {} -> pass (1) | {} -> fail (4) |
# | | {'x': 'y'} -> pass (2) | {'x': 'y'} -> pass (5) |
# | | {'x': None} -> fail (3) | {'x': None} -> fail (6) |
# +---------------------+-------------------------+--------------------------+
# | x-nullable == True | {} -> pass (7) | {} -> fail (10) |
# | | {'x': 'y'} -> pass (8) | {'x': 'y'} -> pass (11) |
# | | {'x': None} -> pass (9) | {'x': None} -> pass (12) |
# +---------------------+-------------------------+--------------------------+
def content_spec_factory(required, nullable):
return {
'type': 'object',
'required': ['x'] if required else [],
'properties': {
'x': {
'type': 'string',
'x-nullable': nullable,
},
},
}
@pytest.mark.parametrize('nullable', [True, False])
@pytest.mark.parametrize('required', [True, False])
def test_nullable_with_value(empty_swagger_spec, nullable, required):
"""With a value set, validation should always pass: (2), (5), (8), (11)"""
content_spec = content_spec_factory(required, nullable)
value = {'x': 'y'}
validate_object(empty_swagger_spec, content_spec, value)
@pytest.mark.parametrize('nullable', [True, False])
def test_nullable_required_no_value(empty_swagger_spec, nullable):
"""When the value is required but not set at all, validation
should fail: (4), (10)
"""
content_spec = content_spec_factory(True, nullable)
value = {}
with pytest.raises(ValidationError) as excinfo:
validate_object(empty_swagger_spec, content_spec, value)
assert "'x' is a required property" in str(excinfo.value.message)
@pytest.mark.parametrize('nullable', [True, False])
def test_nullable_no_value(empty_swagger_spec, nullable):
"""When the value is not required and not set at all, validation
should pass: (1), (7)
"""
content_spec = content_spec_factory(False, nullable=nullable)
value = {}
validate_object(empty_swagger_spec, content_spec, value)
@pytest.mark.parametrize('required', [True, False])
def test_nullable_false_value_none(empty_swagger_spec, required):
"""When nullable is `False` and the value is set to `None`, validation
should fail: (3), (6)
"""
content_spec = content_spec_factory(required, False)
value = {'x': None}
with pytest.raises(ValidationError) as excinfo:
validate_object(empty_swagger_spec, content_spec, value)
assert excinfo.value.message == "None is not of type 'string'"
@pytest.mark.parametrize('required', [True, False])
def test_nullable_none_value(empty_swagger_spec, required):
"""When nullable is `True` and the value is set to `None`, validation
should pass: (9), (12)
"""
content_spec = content_spec_factory(required, True)
value = {'x': None}
validate_object(empty_swagger_spec, content_spec, value)
def test_allOf_minimal(empty_swagger_spec, allOf_spec):
value = {
'business': 'Yelp',
}
validate_object(empty_swagger_spec, allOf_spec, value)
def test_allOf_fails(empty_swagger_spec, allOf_spec):
with pytest.raises(ValidationError) as excinfo:
validate_object(empty_swagger_spec, allOf_spec, {})
assert excinfo.value.message == "'business' is a required property"
def test_allOf_complex(composition_spec):
pongclone_spec = composition_spec.spec_dict['definitions']['pongClone']
value = {
'additionalFeature': 'Badges',
'gameSystem': 'NES',
'pang': 'value',
'releaseDate': 'October',
}
validate_object(composition_spec, pongclone_spec, value)
def test_allOf_complex_failure(composition_spec):
pongclone_spec = composition_spec.spec_dict['definitions']['pongClone']
value = {
'additionalFeature': 'Badges',
'pang': 'value',
'releaseDate': 'October',
}
with pytest.raises(ValidationError) as excinfo:
validate_object(composition_spec, pongclone_spec, value)
assert "'gameSystem' is a required property" in str(excinfo.value.message)
def test_validate_valid_polymorphic_object(polymorphic_spec):
list_of_pets_dict = {
'number_of_pets': 3,
'list': [
{
'name': 'a generic pet name',
'type': 'GenericPet',
},
{
'name': 'a dog name',
'type': 'Dog',
'birth_date': '2017-03-09',
},
{
'name': 'a cat name',
'type': 'Cat',
'color': 'white',
},
],
}
validate_object(
swagger_spec=polymorphic_spec,
object_spec=polymorphic_spec.spec_dict['definitions']['PetList'],
value=list_of_pets_dict,
)
@pytest.mark.parametrize(
'schema_dict, expected_validation_error',
(
[
{
'number_of_pets': 1,
'list': [
{
'name': 'a cat name',
'type': 'Dog',
'color': 'white',
},
],
},
'\'birth_date\' is a required property',
],
[
{
'number_of_pets': 1,
'list': [
{
'name': 'any string',
'type': 'a not defined type',
},
],
},
'\'a not defined type\' is not a recognized schema',
],
[
{
'number_of_pets': 1,
'list': [
{
'name': 'a bird name',
'type': 'Bird',
},
],
},
'discriminated schema \'Bird\' must inherit from \'GenericPet\'',
],
[
{
'number_of_pets': 1,
'list': [
{
'name': 'a whale name',
'type': 'Whale',
'weight': 1000,
},
],
},
'discriminated schema \'Whale\' must inherit from \'GenericPet\'',
],
),
)
def test_validate_invalid_polymorphic_object(polymorphic_spec, schema_dict, expected_validation_error):
with pytest.raises(ValidationError) as e:
validate_object(
swagger_spec=polymorphic_spec,
object_spec=polymorphic_spec.spec_dict['definitions']['PetList'],
value=schema_dict,
)
assert expected_validation_error in str(e.value.message)
def test_validate_invalid_polymorphic_does_not_alter_validation_paths(polymorphic_spec):
dog_dict = {
'name': 'This is a dog name',
'type': 'Dog',
# 'birth_date' this is intentionally removed in order to trigger a validation error
}
with pytest.raises(ValidationError) as excinfo:
validate_object(
swagger_spec=polymorphic_spec,
object_spec=polymorphic_spec.definitions['GenericPet']._model_spec,
value=dog_dict,
)
validation_error = excinfo.value
assert validation_error.validator == 'required'
assert validation_error.validator_value == ['birth_date']
# as birth_date is defined on the 2nd item of the Dog allOf list the expected schema path should be allOf/1/required
assert list(validation_error.schema_path) == ['allOf', 1, 'required']
@pytest.mark.parametrize('internally_dereference_refs', [True, False])
def test_validate_object_with_recursive_definition(
polymorphic_abspath, polymorphic_dict, internally_dereference_refs,
):
# The goal of this test is to ensure that recursive definitions are properly handled
# even if internally_dereference_refs is enabled.
# Introduce a recursion definition into vendor extensions, this "trick" could be used
# to force bravado-core to recognize models that are defined on #/definitions of
# referenced files or defined on un-referenced files
polymorphic_dict['definitions']['GenericPet']['x-referred-schema'] = [
{'$ref': '#/definitions/{}'.format(definition_key)}
for definition_key, definition in iteritems(polymorphic_dict['definitions'])
if {'$ref': '#/definitions/GenericPet'} in definition.get('allOf', [])
]
polymorphic_spec = Spec.from_dict(
spec_dict=polymorphic_dict,
origin_url=get_url(polymorphic_abspath),
config={'internally_dereference_refs': internally_dereference_refs},
)
dog_dict = {
'name': 'This is a dog name',
'type': 'Dog',
'birth_date': '2018-01-01',
}
try:
validate_object(
swagger_spec=polymorphic_spec,
object_spec=polymorphic_spec.definitions['GenericPet']._model_spec,
value=dog_dict,
)
except RuntimeError: # Not catching RecursionError as it was introduced in Python 3.5+
pytest.fail('Unbounded recursion has been detected while calling validate_object')
def test_validate_object_raises_ValidationError_if_discriminator_key_is_missing(
minimal_swagger_dict,
):
# More context for this test on https://github.com/Yelp/bravado-core/issues/301
minimal_swagger_dict['definitions'] = {
'Model': {
"type": 'object',
"properties": {
"discriminator_field": {"type": 'string'},
},
"discriminator": "discriminator_field",
"required": ["discriminator_field"],
},
}
spec = Spec.from_dict(minimal_swagger_dict)
with pytest.raises(ValidationError) as excinfo:
validate_object(
swagger_spec=spec,
object_spec=minimal_swagger_dict['definitions']['Model'],
value={},
)
assert "'discriminator_field' is a required property" in excinfo.value.message
|
chrome/common/extensions/docs/server2/api_data_source_test.py | kjthegod/chromium | 2,151 | 11074095 | <filename>chrome/common/extensions/docs/server2/api_data_source_test.py
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from api_data_source import APIDataSource
from extensions_paths import CHROME_EXTENSIONS
from servlet import Request
from server_instance import ServerInstance
from test_data.api_data_source.canned_master_fs import CANNED_MASTER_FS_DATA
from test_file_system import TestFileSystem
class APIDataSourceTest(unittest.TestCase):
def setUp(self):
self.server_instance = ServerInstance.ForTest(
TestFileSystem(CANNED_MASTER_FS_DATA, relative_to=CHROME_EXTENSIONS))
def testGet(self):
api_ds = APIDataSource(self.server_instance, Request.ForTest('/'))
jsc_view = api_ds.get('extensions').get('tester')
funcs_arr = [{
'availability': None,
'callback': {
'name': 'callback',
'optional': False,
'parameters': [{
'array': {
'availability': None,
'description': None,
'events': [],
'functions': [],
'id': 'type-results-resultsType',
'is_object': False,
'link': {
'name': 'TypeA',
'ref': 'tester.TypeA',
'text': 'TypeA'
},
'name': 'resultsType',
'properties': []
},
'availability': None,
'description': None,
'functions': [],
'id': 'property-callback-results',
'is_object': False,
'last': True,
'name': 'results',
'optional': None,
'parameters': [],
'parentName': 'callback',
'properties': [],
'returns': None
}],
'simple_type': {
'simple_type': 'function'
}
},
'description': 'Gets stuff.',
'id': 'method-get',
'name': 'get',
'parameters': [{
'availability': None,
'choices': [{
'availability': None,
'description': None,
'events': [],
'functions': [],
'id': 'type-a-string',
'is_object': False,
'name': 'string',
'properties': [],
'simple_type': 'string'
},
{
'array': {
'availability': None,
'description': None,
'events': [],
'functions': [],
'id': 'type-strings-stringsType',
'is_object': False,
'name': 'stringsType',
'properties': [],
'simple_type': 'string'
},
'availability': None,
'description': None,
'events': [],
'functions': [],
'id': 'type-a-strings',
'is_object': False,
'last': True,
'name': 'strings',
'properties': []
}],
'description': 'a param',
'functions': [],
'id': 'property-get-a',
'is_object': False,
'name': 'a',
'optional': None,
'parameters': [],
'parentName': 'get',
'properties': [],
'returns': None
},
{
'asFunction': {
'name': 'callback',
'optional': False,
'parameters': [{
'array': {
'availability': None,
'description': None,
'events': [],
'functions': [],
'id': 'type-results-resultsType',
'is_object': False,
'link': {
'name': 'TypeA',
'ref': 'tester.TypeA',
'text': 'TypeA'
},
'name': 'resultsType',
'properties': []
},
'availability': None,
'description': None,
'functions': [],
'id': 'property-callback-results',
'is_object': False,
'last': True,
'name': 'results',
'optional': None,
'parameters': [],
'parentName': 'callback',
'properties': [],
'returns': None
}],
'simple_type': {
'simple_type': 'function'
}
},
'description': None,
'id': 'property-get-callback',
'isCallback': True,
'last': True,
'name': 'callback',
'optional': False,
'parentName': 'get',
'simple_type': 'function'
}],
'returns': None
}]
self.assertEquals(funcs_arr, jsc_view['functions'])
types_arr = [{
'availability': None,
'description': 'A cool thing.',
'events': [],
'functions': [],
'id': 'type-TypeA',
'is_object': True,
'name': 'TypeA',
'properties': [{
'array': {
'availability': None,
'description': None,
'events': [],
'functions': [],
'id': 'type-b-bType',
'is_object': False,
'link': {
'name': 'TypeA',
'ref': 'tester.TypeA',
'text': 'TypeA'
},
'name': 'bType',
'properties': []
},
'availability': None,
'description': 'List of TypeA.',
'functions': [],
'id': 'property-TypeA-b',
'is_object': False,
'name': 'b',
'optional': True,
'parameters': [],
'parentName': 'TypeA',
'properties': [],
'returns': None
}],
'simple_type': 'object'
}]
self.assertEquals(types_arr, jsc_view['types'])
if __name__ == '__main__':
unittest.main()
|
utils/network/arrange_network_detection_results.py | claraeyoon/FAST | 126 | 11074099 | <filename>utils/network/arrange_network_detection_results.py
import numpy as np
# ---------------------------------------------------INPUTS --------------------------------------------
det_dir = '../../data/network_detection/'
network_file = '7sta_2stathresh_detlist_rank_by_peaksum.txt'
nsta = 7
#det_dir = '/lfs/1/ceyoon/TimeSeries/HectorMine/network_detection/'
#network_file = '7sta_2stathresh_detlist_rank_by_peaksum.txt'
#nsta = 7
#det_dir = '/lfs/1/ceyoon/TimeSeries/ItalyDay/day303/network_detection/'
#network_file = '22sta_2stathresh_detlist_rank_by_peaksum.txt'
#nsta = 22
#det_dir = '/lfs/1/ceyoon/TimeSeries/SaudiMonth/data/network_detection/'
#network_file = '19sta_2stathresh_detlist_rank_by_peaksum.txt'
#nsta = 19
#det_dir = '/lfs/1/ceyoon/TimeSeries/OBSTest/network_detection/'
#network_file = '35sta_2stathresh_detlist_rank_by_peaksum.txt'
#nsta = 35
#det_dir = '/lfs/1/ceyoon/TimeSeries/AllWenchuan/network_detection/'
#network_file = '15sta_2stathresh_detlist_rank_by_peaksum.txt'
#nsta = 15
#det_dir = '/lfs/1/ceyoon/TimeSeries/SaudiFull/network_detection/'
#network_file = '36sta_3stathresh_detlist_rank_by_peaksum.txt'
#nsta = 36
#det_dir = '/lfs/1/ceyoon/TimeSeries/Diablo/network_detection/'
#network_file = '11sta_2stathresh_mindets3_dgapL10_inputoffset15_detlist_rank_by_peaksum.txt'
#nsta = 11
#det_dir = '/lfs/1/ceyoon/TimeSeries/Okmok/network_detection/'
#network_file = '7sta_2stathresh_detlist_rank_by_peaksum.txt'
#nsta = 7
# Output detection time at each station to file (avoid this if there are many stations)
flag_each_station = False
#-------
#det_dir = '/lfs/1/ceyoon/TimeSeries/TanzaniaMonth/network_detection/'
#network_file = '5sta_2stathresh_detlist_rank_by_peaksum.txt'
#nsta = 5
#flag_each_station = True
#-------
# ---------------------------------------------------INPUTS --------------------------------------------
out_file = 'NetworkDetectionTimes_'+network_file
# Read in network detections
data = np.genfromtxt(det_dir+network_file, dtype=str, skip_header=1)
print("Number of network detections: ", len(data))
# Get min and max index (time) for each event over all stations without NaNs
ind_time_min = []
ind_time_max = []
ind_time_diff = []
count_not_nan = []
for iev in range(len(data)):
cur_event = data[iev]
ind_event_time = []
for ista in range(nsta):
if 'nan' != str.lower(cur_event[ista]):
ind_event_time.append(int(cur_event[ista]))
print('event:', iev, ', times: ', ind_event_time)
ind_time_min.append(str(min(ind_event_time)))
ind_time_max.append(str(max(ind_event_time)))
ind_time_diff.append(str(max(ind_event_time)-min(ind_event_time)))
count_not_nan.append(str(len(ind_event_time)))
# Output network detections with min index and max index to file
fout = open(det_dir+out_file, 'w')
for iev in range(len(data)):
fout.write('%12s %12s %12s %12s %12s %12s %12s %12s %12s %12s %12s %12s' % (ind_time_min[iev], ind_time_max[iev], data[iev][nsta], data[iev][nsta+1], data[iev][nsta+2], data[iev][nsta+3], data[iev][nsta+4], data[iev][nsta+5], data[iev][nsta+6], data[iev][nsta+7], count_not_nan[iev], ind_time_diff[iev]))
if (flag_each_station):
for ista in range(nsta):
fout.write('%12s' % data[iev][ista])
fout.write('\n')
fout.close()
|
azure-iot-device/tests/provisioning/shared_client_tests.py | dominicbetts/azure-iot-sdk-python | 366 | 11074104 | <reponame>dominicbetts/azure-iot-sdk-python
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""This module contains tests that are shared between sync/async clients
i.e. tests for things defined in abstract clients"""
import pytest
import logging
import socks
from azure.iot.device.common import auth
from azure.iot.device.common.auth import sastoken as st
from azure.iot.device.provisioning.pipeline import ProvisioningPipelineConfig
from azure.iot.device import ProxyOptions
from azure.iot.device.common.pipeline.config import DEFAULT_KEEPALIVE
logging.basicConfig(level=logging.DEBUG)
fake_provisioning_host = "hogwarts.com"
fake_registration_id = "MyPensieve"
fake_id_scope = "Enchanted0000Ceiling7898"
fake_symmetric_key = "<KEY>"
class SharedProvisioningClientInstantiationTests(object):
@pytest.mark.it(
"Stores the ProvisioningPipeline from the 'pipeline' parameter in the '_pipeline' attribute"
)
def test_sets_provisioning_pipeline(self, client_class, provisioning_pipeline):
client = client_class(provisioning_pipeline)
assert client._pipeline is provisioning_pipeline
@pytest.mark.it(
"Instantiates with the initial value of the '_provisioning_payload' attribute set to None"
)
def test_payload(self, client_class, provisioning_pipeline):
client = client_class(provisioning_pipeline)
assert client._provisioning_payload is None
class SharedProvisioningClientCreateMethodUserOptionTests(object):
@pytest.mark.it(
"Sets the 'websockets' user option parameter on the PipelineConfig, if provided"
)
def test_websockets_option(
self, mocker, client_create_method, create_method_args, mock_pipeline_init
):
client_create_method(*create_method_args, websockets=True)
# Get configuration object
assert mock_pipeline_init.call_count == 1
config = mock_pipeline_init.call_args[0][0]
assert isinstance(config, ProvisioningPipelineConfig)
assert config.websockets
# TODO: Show that input in the wrong format is formatted to the correct one. This test exists
# in the ProvisioningPipelineConfig object already, but we do not currently show that this is felt
# from the API level.
@pytest.mark.it("Sets the 'cipher' user option parameter on the PipelineConfig, if provided")
def test_cipher_option(self, client_create_method, create_method_args, mock_pipeline_init):
cipher = "DHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA:ECDHE-ECDSA-AES128-GCM-SHA256"
client_create_method(*create_method_args, cipher=cipher)
# Get configuration object
assert mock_pipeline_init.call_count == 1
config = mock_pipeline_init.call_args[0][0]
assert isinstance(config, ProvisioningPipelineConfig)
assert config.cipher == cipher
@pytest.mark.it("Sets the 'proxy_options' user option parameter on the PipelineConfig")
def test_proxy_options(self, client_create_method, create_method_args, mock_pipeline_init):
proxy_options = ProxyOptions(proxy_type=socks.HTTP, proxy_addr="127.0.0.1", proxy_port=8888)
client_create_method(*create_method_args, proxy_options=proxy_options)
# Get configuration object
assert mock_pipeline_init.call_count == 1
config = mock_pipeline_init.call_args[0][0]
assert isinstance(config, ProvisioningPipelineConfig)
assert config.proxy_options is proxy_options
@pytest.mark.it(
"Sets the 'keep_alive' user option parameter on the PipelineConfig, if provided"
)
def test_keep_alive_options(self, client_create_method, create_method_args, mock_pipeline_init):
keepalive_value = 60
client_create_method(*create_method_args, keep_alive=keepalive_value)
# Get configuration object, and ensure it was used for both protocol pipelines
assert mock_pipeline_init.call_count == 1
config = mock_pipeline_init.call_args[0][0]
assert isinstance(config, ProvisioningPipelineConfig)
assert config.keep_alive == keepalive_value
@pytest.mark.it("Raises a TypeError if an invalid user option parameter is provided")
def test_invalid_option(
self, mocker, client_create_method, create_method_args, mock_pipeline_init
):
with pytest.raises(TypeError):
client_create_method(*create_method_args, invalid_option="some_value")
@pytest.mark.it("Sets default user options if none are provided")
def test_default_options(
self, mocker, client_create_method, create_method_args, mock_pipeline_init
):
client_create_method(*create_method_args)
# Pipeline uses a ProvisioningPipelineConfig
assert mock_pipeline_init.call_count == 1
config = mock_pipeline_init.call_args[0][0]
assert isinstance(config, ProvisioningPipelineConfig)
# ProvisioningPipelineConfig has default options set that were not user-specified
assert config.websockets is False
assert config.cipher == ""
assert config.proxy_options is None
assert config.keep_alive == DEFAULT_KEEPALIVE
@pytest.mark.parametrize(
"registration_id",
[
pytest.param(None, id="No Registration Id provided"),
pytest.param(" ", id="Blank Registration Id provided"),
pytest.param("", id="Empty Registration Id provided"),
],
)
def test_invalid_registration_id(self, client_create_method, registration_id):
with pytest.raises(ValueError):
client_create_method(
fake_provisioning_host, registration_id, fake_id_scope, fake_symmetric_key
)
@pytest.mark.usefixtures("mock_pipeline_init")
class SharedProvisioningClientCreateFromSymmetricKeyTests(
SharedProvisioningClientCreateMethodUserOptionTests
):
@pytest.fixture
def client_create_method(self, client_class):
return client_class.create_from_symmetric_key
@pytest.fixture
def create_method_args(self):
return [fake_provisioning_host, fake_registration_id, fake_id_scope, fake_symmetric_key]
@pytest.mark.it(
"Creates a SasToken that uses a SymmetricKeySigningMechanism, from the values provided in paramaters"
)
def test_sastoken(self, mocker, client_class):
sksm_mock = mocker.patch.object(auth, "SymmetricKeySigningMechanism")
sastoken_mock = mocker.patch.object(st, "RenewableSasToken")
expected_uri = "{id_scope}/registrations/{registration_id}".format(
id_scope=fake_id_scope, registration_id=fake_registration_id
)
custom_ttl = 1000
client_class.create_from_symmetric_key(
provisioning_host=fake_provisioning_host,
registration_id=fake_registration_id,
id_scope=fake_id_scope,
symmetric_key=fake_symmetric_key,
sastoken_ttl=custom_ttl,
)
# SymmetricKeySigningMechanism created using the provided symmetric key
assert sksm_mock.call_count == 1
assert sksm_mock.call_args == mocker.call(key=fake_symmetric_key)
# SasToken created with the SymmetricKeySigningMechanism, the expected URI, and the custom ttl
assert sastoken_mock.call_count == 1
assert sastoken_mock.call_args == mocker.call(
expected_uri, sksm_mock.return_value, ttl=custom_ttl
)
@pytest.mark.it(
"Uses 3600 seconds (1 hour) as the default SasToken TTL if no custom TTL is provided"
)
def test_sastoken_default(self, mocker, client_class):
sksm_mock = mocker.patch.object(auth, "SymmetricKeySigningMechanism")
sastoken_mock = mocker.patch.object(st, "RenewableSasToken")
expected_uri = "{id_scope}/registrations/{registration_id}".format(
id_scope=fake_id_scope, registration_id=fake_registration_id
)
client_class.create_from_symmetric_key(
provisioning_host=fake_provisioning_host,
registration_id=fake_registration_id,
id_scope=fake_id_scope,
symmetric_key=fake_symmetric_key,
)
# SymmetricKeySigningMechanism created using the provided symmetric key
assert sksm_mock.call_count == 1
assert sksm_mock.call_args == mocker.call(key=fake_symmetric_key)
# SasToken created with the SymmetricKeySigningMechanism, the expected URI, and the default ttl
assert sastoken_mock.call_count == 1
assert sastoken_mock.call_args == mocker.call(
expected_uri, sksm_mock.return_value, ttl=3600
)
@pytest.mark.it(
"Creates an MQTT pipeline with a ProvisioningPipelineConfig object containing the SasToken and values provided in the parameters"
)
def test_pipeline_config(self, mocker, client_class, mock_pipeline_init):
sastoken_mock = mocker.patch.object(st, "RenewableSasToken")
client_class.create_from_symmetric_key(
provisioning_host=fake_provisioning_host,
registration_id=fake_registration_id,
id_scope=fake_id_scope,
symmetric_key=fake_symmetric_key,
)
# Verify pipeline was created with a ProvisioningPipelineConfig
assert mock_pipeline_init.call_count == 1
assert isinstance(mock_pipeline_init.call_args[0][0], ProvisioningPipelineConfig)
# Verify the ProvisioningPipelineConfig is constructed as expected
config = mock_pipeline_init.call_args[0][0]
assert config.hostname == fake_provisioning_host
assert config.gateway_hostname is None
assert config.registration_id == fake_registration_id
assert config.id_scope == fake_id_scope
assert config.sastoken is sastoken_mock.return_value
@pytest.mark.it(
"Returns an instance of a ProvisioningDeviceClient using the created MQTT pipeline"
)
def test_client_returned(self, mocker, client_class, mock_pipeline_init):
client = client_class.create_from_symmetric_key(
provisioning_host=fake_provisioning_host,
registration_id=fake_registration_id,
id_scope=fake_id_scope,
symmetric_key=fake_symmetric_key,
)
assert isinstance(client, client_class)
assert client._pipeline is mock_pipeline_init.return_value
@pytest.mark.it("Raises ValueError if a SasToken creation results in failure")
def test_sastoken_failure(self, mocker, client_class):
sastoken_mock = mocker.patch.object(st, "RenewableSasToken")
token_err = st.SasTokenError("Some SasToken failure")
sastoken_mock.side_effect = token_err
with pytest.raises(ValueError) as e_info:
client_class.create_from_symmetric_key(
provisioning_host=fake_provisioning_host,
registration_id=fake_registration_id,
id_scope=fake_id_scope,
symmetric_key=fake_symmetric_key,
)
assert e_info.value.__cause__ is token_err
@pytest.mark.usefixtures("mock_pipeline_init")
class SharedProvisioningClientCreateFromX509CertificateTests(
SharedProvisioningClientCreateMethodUserOptionTests
):
@pytest.fixture
def client_create_method(self, client_class):
return client_class.create_from_x509_certificate
@pytest.fixture
def create_method_args(self, x509):
return [fake_provisioning_host, fake_registration_id, fake_id_scope, x509]
@pytest.mark.it(
"Creats MQTT pipeline with a ProvisioningPipelineConfig object containing the X509 and other values provided in parameters"
)
def test_pipeline_config(self, mocker, client_class, x509, mock_pipeline_init):
client_class.create_from_x509_certificate(
provisioning_host=fake_provisioning_host,
registration_id=fake_registration_id,
id_scope=fake_id_scope,
x509=x509,
)
# Verify pipeline created with a ProvisioningPipelineConfig
assert mock_pipeline_init.call_count == 1
assert isinstance(mock_pipeline_init.call_args[0][0], ProvisioningPipelineConfig)
# Verify the ProvisioningPipelineConfig is constructed as expected
config = mock_pipeline_init.call_args[0][0]
assert config.hostname == fake_provisioning_host
assert config.gateway_hostname is None
assert config.registration_id == fake_registration_id
assert config.id_scope == fake_id_scope
assert config.x509 is x509
@pytest.mark.it(
"Returns an instance of a ProvisioningDeviceClient using the created MQTT pipeline"
)
def test_client_returned(self, mocker, client_class, x509, mock_pipeline_init):
client = client_class.create_from_x509_certificate(
provisioning_host=fake_provisioning_host,
registration_id=fake_registration_id,
id_scope=fake_id_scope,
x509=x509,
)
assert isinstance(client, client_class)
assert client._pipeline is mock_pipeline_init.return_value
@pytest.mark.it("Raises a TypeError if the 'sastoken_ttl' kwarg is supplied by the user")
def test_sastoken_ttl(self, client_class, x509):
with pytest.raises(TypeError):
client_class.create_from_x509_certificate(
provisioning_host=fake_provisioning_host,
registration_id=fake_registration_id,
id_scope=fake_id_scope,
x509=x509,
sastoken_ttl=1000,
)
|
dfvfs/vfs/ext_file_entry.py | dfjxs/dfvfs | 176 | 11074113 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
"""The EXT file entry implementation."""
from dfdatetime import posix_time as dfdatetime_posix_time
from dfvfs.lib import definitions
from dfvfs.lib import errors
from dfvfs.path import ext_path_spec
from dfvfs.vfs import attribute
from dfvfs.vfs import ext_attribute
from dfvfs.vfs import file_entry
class EXTDirectory(file_entry.Directory):
"""File system directory that uses pyfsext."""
def _EntriesGenerator(self):
"""Retrieves directory entries.
Since a directory can contain a vast number of entries using
a generator is more memory efficient.
Yields:
EXTPathSpec: EXT path specification.
"""
try:
fsext_file_entry = self._file_system.GetEXTFileEntryByPathSpec(
self.path_spec)
except errors.PathSpecError:
return
location = getattr(self.path_spec, 'location', None)
for fsext_sub_file_entry in fsext_file_entry.sub_file_entries:
directory_entry = fsext_sub_file_entry.name
if not location or location == self._file_system.PATH_SEPARATOR:
directory_entry = self._file_system.JoinPath([directory_entry])
else:
directory_entry = self._file_system.JoinPath([
location, directory_entry])
yield ext_path_spec.EXTPathSpec(
inode=fsext_sub_file_entry.inode_number, location=directory_entry,
parent=self.path_spec.parent)
class EXTFileEntry(file_entry.FileEntry):
"""File system file entry that uses pyfsext."""
TYPE_INDICATOR = definitions.TYPE_INDICATOR_EXT
# Mappings of EXT file types to dfVFS file entry types.
_ENTRY_TYPES = {
0x1000: definitions.FILE_ENTRY_TYPE_PIPE,
0x2000: definitions.FILE_ENTRY_TYPE_DEVICE,
0x4000: definitions.FILE_ENTRY_TYPE_DIRECTORY,
0x6000: definitions.FILE_ENTRY_TYPE_DEVICE,
0x8000: definitions.FILE_ENTRY_TYPE_FILE,
0xa000: definitions.FILE_ENTRY_TYPE_LINK,
0xc000: definitions.FILE_ENTRY_TYPE_SOCKET}
_NANOSECONDS_PER_SECOND = 1000000000
def __init__(
self, resolver_context, file_system, path_spec, fsext_file_entry=None,
is_root=False, is_virtual=False):
"""Initializes a file entry.
Args:
resolver_context (Context): resolver context.
file_system (FileSystem): file system.
path_spec (PathSpec): path specification.
fsext_file_entry (Optional[pyfsext.file_entry]): EXT file entry.
is_root (Optional[bool]): True if the file entry is the root file entry
of the corresponding file system.
is_virtual (Optional[bool]): True if the file entry is a virtual file
entry emulated by the corresponding file system.
Raises:
BackEndError: if the pyfsext file entry is missing.
"""
if not fsext_file_entry:
fsext_file_entry = file_system.GetEXTFileEntryByPathSpec(path_spec)
if not fsext_file_entry:
raise errors.BackEndError('Missing pyfsext file entry.')
if is_root:
file_entry_name = ''
else:
file_entry_name = fsext_file_entry.name
# Use the path specification location to determine the file entry name
# if the file entry was retrieved by inode.
if file_entry_name is None:
location = getattr(path_spec, 'location', None)
if location:
location_segments = file_system.SplitPath(location)
if location_segments:
file_entry_name = location_segments[-1]
super(EXTFileEntry, self).__init__(
resolver_context, file_system, path_spec, is_root=is_root,
is_virtual=is_virtual)
self._creation_time = fsext_file_entry.get_creation_time_as_integer()
self._fsext_file_entry = fsext_file_entry
self._name = file_entry_name
self.entry_type = self._ENTRY_TYPES.get(
fsext_file_entry.file_mode & 0xf000, None)
def _GetAttributes(self):
"""Retrieves the attributes.
Returns:
list[Attribute]: attributes.
"""
if self._attributes is None:
self._attributes = []
for fsext_extended_attribute in (
self._fsext_file_entry.extended_attributes):
extended_attribute = ext_attribute.EXTExtendedAttribute(
fsext_extended_attribute)
self._attributes.append(extended_attribute)
return self._attributes
def _GetDirectory(self):
"""Retrieves a directory.
Returns:
EXTDirectory: directory or None if not available.
"""
if self.entry_type != definitions.FILE_ENTRY_TYPE_DIRECTORY:
return None
return EXTDirectory(self._file_system, self.path_spec)
def _GetLink(self):
"""Retrieves the link.
Returns:
str: path of the linked file.
"""
if self._link is None:
self._link = self._fsext_file_entry.symbolic_link_target
if self._link and self._link[0] != self._file_system.PATH_SEPARATOR:
# TODO: make link absolute.
self._link = '/{0:s}'.format(self._link)
return self._link
def _GetStat(self):
"""Retrieves information about the file entry.
Returns:
VFSStat: a stat object.
"""
stat_object = super(EXTFileEntry, self)._GetStat()
# File data stat information.
stat_object.size = self._fsext_file_entry.size
# Ownership and permissions stat information.
stat_object.mode = self._fsext_file_entry.file_mode & 0x0fff
stat_object.uid = self._fsext_file_entry.owner_identifier
stat_object.gid = self._fsext_file_entry.group_identifier
# File entry type stat information.
stat_object.type = self.entry_type
# Other stat information.
stat_object.ino = self._fsext_file_entry.inode_number
stat_object.fs_type = 'EXT'
stat_object.is_allocated = True
return stat_object
def _GetStatAttribute(self):
"""Retrieves a stat attribute.
Returns:
StatAttribute: a stat attribute or None if not available.
"""
stat_attribute = attribute.StatAttribute()
stat_attribute.group_identifier = self._fsext_file_entry.group_identifier
stat_attribute.inode_number = self._fsext_file_entry.inode_number
stat_attribute.mode = self._fsext_file_entry.file_mode
stat_attribute.number_of_links = self._fsext_file_entry.number_of_links
stat_attribute.owner_identifier = self._fsext_file_entry.owner_identifier
stat_attribute.size = self._fsext_file_entry.size
stat_attribute.type = self.entry_type
return stat_attribute
def _GetSubFileEntries(self):
"""Retrieves a sub file entries generator.
Yields:
EXTFileEntry: a sub file entry.
"""
if self._directory is None:
self._directory = self._GetDirectory()
if self._directory:
for path_spec in self._directory.entries:
yield EXTFileEntry(
self._resolver_context, self._file_system, path_spec)
@property
def access_time(self):
"""dfdatetime.DateTimeValues: access time or None if not available."""
timestamp = self._fsext_file_entry.get_access_time_as_integer()
# If creation time is not present (None) the timestamp precision is in
# seconds.
if self._creation_time is None:
timestamp, _ = divmod(timestamp, self._NANOSECONDS_PER_SECOND)
return dfdatetime_posix_time.PosixTime(timestamp=timestamp)
return dfdatetime_posix_time.PosixTimeInNanoseconds(timestamp=timestamp)
@property
def change_time(self):
"""dfdatetime.DateTimeValues: change time or None if not available."""
timestamp = self._fsext_file_entry.get_inode_change_time_as_integer()
# If creation time is not present (None) the timestamp precision is in
# seconds.
if self._creation_time is None:
timestamp, _ = divmod(timestamp, self._NANOSECONDS_PER_SECOND)
return dfdatetime_posix_time.PosixTime(timestamp=timestamp)
return dfdatetime_posix_time.PosixTimeInNanoseconds(timestamp=timestamp)
@property
def creation_time(self):
"""dfdatetime.DateTimeValues: creation time or None if not available."""
# Creation time can be None if not present and 0 if not set.
if not self._creation_time:
return None
return dfdatetime_posix_time.PosixTimeInNanoseconds(
timestamp=self._creation_time)
@property
def deletion_time(self):
"""dfdatetime.DateTimeValues: deletion time or None if not available."""
timestamp = self._fsext_file_entry.get_deletion_time_as_integer()
# Deletion time can be 0 if not set.
if not timestamp:
return None
return dfdatetime_posix_time.PosixTime(timestamp=timestamp)
@property
def name(self):
"""str: name of the file entry, which does not include the full path."""
return self._name
@property
def modification_time(self):
"""dfdatetime.DateTimeValues: modification time or None if not available."""
timestamp = self._fsext_file_entry.get_modification_time_as_integer()
# If creation time is not present (None) the timestamp precision is in
# seconds.
if self._creation_time is None:
timestamp, _ = divmod(timestamp, self._NANOSECONDS_PER_SECOND)
return dfdatetime_posix_time.PosixTime(timestamp=timestamp)
return dfdatetime_posix_time.PosixTimeInNanoseconds(timestamp=timestamp)
@property
def size(self):
"""int: size of the file entry in bytes or None if not available."""
return self._fsext_file_entry.size
def GetEXTFileEntry(self):
"""Retrieves the EXT file entry.
Returns:
pyfsext.file_entry: EXT file entry.
"""
return self._fsext_file_entry
def GetLinkedFileEntry(self):
"""Retrieves the linked file entry, e.g. for a symbolic link.
Returns:
EXTFileEntry: linked file entry or None if not available.
"""
link = self._GetLink()
if not link:
return None
parent_path_spec = getattr(self.path_spec, 'parent', None)
path_spec = ext_path_spec.EXTPathSpec(
location=link, parent=parent_path_spec)
is_root = bool(link == self._file_system.LOCATION_ROOT)
return EXTFileEntry(
self._resolver_context, self._file_system, path_spec, is_root=is_root)
def GetParentFileEntry(self):
"""Retrieves the parent file entry.
Returns:
EXTFileEntry: parent file entry or None if not available.
"""
parent_location = None
location = getattr(self.path_spec, 'location', None)
if location is not None:
parent_location = self._file_system.DirnamePath(location)
if parent_location == '':
parent_location = self._file_system.PATH_SEPARATOR
parent_path_spec = getattr(self.path_spec, 'parent', None)
path_spec = ext_path_spec.EXTPathSpec(
location=parent_location, parent=parent_path_spec)
is_root = bool(parent_location == self._file_system.LOCATION_ROOT)
return EXTFileEntry(
self._resolver_context, self._file_system, path_spec, is_root=is_root)
|
var/spack/repos/builtin/packages/py-pyjnius/package.py | kkauder/spack | 2,360 | 11074132 | <filename>var/spack/repos/builtin/packages/py-pyjnius/package.py
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyPyjnius(PythonPackage):
"""Pyjnius is a Python library for accessing Java classes."""
homepage = "https://pyjnius.readthedocs.io/en/stable"
pypi = "pyjnius/pyjnius-1.3.0.0.tar.gz"
version('1.3.0.0', sha256='d20845e75a2d18224e661d0e2bc2ce9141f17472e685cd6579847b0a7b5da6ad')
depends_on('py-setuptools', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-cython', type=('build', 'run'))
depends_on('java', type=('build', 'run'))
|
spacy_lookup/__init__.py | mpuig/spacy-lookup | 243 | 11074138 | <reponame>mpuig/spacy-lookup
# coding: utf8
from __future__ import unicode_literals
from flashtext import KeywordProcessor
from spacy.tokens import Doc, Span, Token
from .about import __version__
class Entity(object):
name = 'entity'
def __init__(self, keywords_list=[], keywords_dict={}, keywords_file=None,
label='', case_sensitive=False,
attrs=('has_entities', 'is_entity', 'entity_desc', 'entities', 'canonical')):
"""Initialise the pipeline component.
"""
self._has_entities, self._is_entity, self._entity_desc, self._entities, self.canonical = attrs
# Set up the KeywordProcessor
self.keyword_processor = KeywordProcessor(case_sensitive=case_sensitive)
self.keyword_processor.add_keywords_from_list(keywords_list)
self.keyword_processor.add_keywords_from_dict(keywords_dict)
if keywords_file:
self.keyword_processor.add_keyword_from_file(keywords_file)
self.label = label
# Register attribute on the Doc and Span
Doc.set_extension(self._has_entities, getter=self.has_entities, force=True)
Doc.set_extension(self._entities, getter=self.iter_entities, force=True)
Span.set_extension(self._has_entities, getter=self.has_entities, force=True)
Span.set_extension(self._entities, getter=self.iter_entities, force=True)
# Register attribute on the Token.
Token.set_extension(self._is_entity, default=False, force=True)
Token.set_extension(self._entity_desc, getter=self.get_entity_desc, force=True)
Token.set_extension(self.canonical, default=None, force=True)
def __call__(self, doc):
"""Apply the pipeline component on a Doc object and modify it if matches
are found. Return the Doc, so it can be processed by the next component
in the pipeline, if available.
"""
matches = self.keyword_processor.extract_keywords(doc.text, span_info=True)
if len(matches)>0:
entities = [ent.text for ent in doc.ents]
spans = [] # keep spans here to merge them later
for canonical, start, end in matches:
# Generate Span representing the entity & set label
# Using doc.char_span() instead of Span() because the keyword processor returns
# index values based on character positions, not words
entity = doc.char_span(start, end, label=self.label)
if entity and entity.text not in entities:
spans.append(entity)
for token in entity: # set values of token attributes
token._.set(self._is_entity, True)
token._.set('canonical', canonical)
# Overwrite doc.ents and add entity – be careful not to replace!
doc.ents = list(doc.ents) + spans
for span in spans:
# Iterate over all spans and merge them into one token. This is done
# after setting the entities – otherwise, it would cause mismatched
# indices!
span.merge()
return doc
def has_entities(self, tokens):
return any(token._.get(self._is_entity) for token in tokens)
def iter_entities(self, tokens):
return [(t.text, i, t._.get(self.canonical))
for i, t in enumerate(tokens)
if t._.get(self._is_entity)]
def get_entity_desc(self, token):
return token.text
|
docs/examples/use_cases/paddle/ssd/train.py | cyyever/DALI | 3,967 | 11074145 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2017-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import shutil
import time
import numpy as np
from paddle import fluid
import paddle
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.types as types
import nvidia.dali.fn as fn
from nvidia.dali.plugin.paddle import DALIGenericIterator, LastBatchPolicy
from ssd import SSD
from utils import load_weights
PRETRAIN_WEIGHTS = 'https://paddle-imagenet-models-name.bj.bcebos.com/VGG16_caffe_pretrained.tar'
def create_coco_pipeline(file_root,
annotations_file,
batch_size=1,
device_id=0,
num_threads=4,
local_rank=0,
world_size=1):
pipeline = Pipeline(batch_size, num_threads,
local_rank, seed=42 + device_id)
with pipeline:
images, bboxes, labels = fn.readers.coco(file_root=file_root,
annotations_file=annotations_file,
skip_empty=True,
shard_id=local_rank,
num_shards=world_size,
ratio=True,
ltrb=True,
random_shuffle=False,
shuffle_after_epoch=True,
name="Reader")
crop_begin, crop_size, bboxes, labels = fn.random_bbox_crop(bboxes, labels,
device="cpu",
aspect_ratio=[0.5, 2.0],
thresholds=[0, 0.1, 0.3, 0.5, 0.7, 0.9],
scaling=[0.3, 1.0],
bbox_layout="xyXY",
allow_no_crop=True,
num_attempts=50)
images = fn.decoders.image_slice(images, crop_begin, crop_size, device="mixed", output_type=types.RGB)
flip_coin = fn.random.coin_flip(probability=0.5)
images = fn.resize(images,
resize_x=300,
resize_y=300,
min_filter=types.DALIInterpType.INTERP_TRIANGULAR)
# use float to avoid clipping and quantizing the intermediate result
images = fn.hsv(images, dtype=types.FLOAT, hue=fn.random.uniform(range=[-0.5, 0.5]),
saturation=fn.random.uniform(range=[0.5, 1.5]))
images = fn.brightness_contrast(images,
contrast_center = 128, # input is in float, but in 0..255 range
dtype = types.UINT8,
brightness = fn.random.uniform(range=[0.875, 1.125]),
contrast = fn.random.uniform(range=[0.5, 1.5]))
bboxes = fn.bb_flip(bboxes, ltrb=True, horizontal=flip_coin)
images = fn.crop_mirror_normalize(images,
mean=[104., 117., 123.],
std=[1., 1., 1.],
mirror=flip_coin,
dtype=types.FLOAT,
output_layout="CHW",
pad_output=False)
pipeline.set_outputs(images, bboxes, labels)
return pipeline
class AverageMeter(object):
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def build():
model = SSD()
image = fluid.layers.data(
name='image', shape=[3, 300, 300], dtype='float32')
gt_box = fluid.layers.data(
name='gt_box', shape=[4], dtype='float32', lod_level=1)
gt_label = fluid.layers.data(
name='gt_label', shape=[1], dtype='int32', lod_level=1)
return model(image, gt_box, gt_label)
def main():
places = []
for p in fluid.framework.cuda_places():
place = fluid.core.Place()
place.set_place(p)
places.append(place)
file_root = os.path.join(FLAGS.data, 'train2017')
annotations_file = os.path.join(
FLAGS.data, 'annotations/instances_train2017.json')
world_size = len(places)
pipelines = [
create_coco_pipeline(
file_root, annotations_file, FLAGS.batch_size, p.gpu_device_id(),
FLAGS.num_threads, local_rank=idx, world_size=world_size)
for idx, p in enumerate(places)]
train_loader = DALIGenericIterator(
pipelines, ['image', ('gt_box', 1), ('gt_label', 1)],
reader_name="Reader", last_batch_policy=LastBatchPolicy.PARTIAL, auto_reset=True, dynamic_shape=True)
FLAGS.whole_batch_size = FLAGS.batch_size * world_size
total_steps = 400000
if FLAGS.check_loss_steps > 0:
total_steps = FLAGS.check_loss_steps
milestones = [280000, 360000]
values = [FLAGS.lr * (0.1**i) for i in range(len(milestones) + 1)]
exe = fluid.Executor(fluid.CUDAPlace(0))
startup_prog = fluid.Program()
train_prog = fluid.Program()
with fluid.program_guard(train_prog, startup_prog):
with fluid.unique_name.guard():
train_fetch_list = build()
learning_rate = fluid.layers.piecewise_decay(
boundaries=milestones, values=values)
learning_rate = fluid.layers.linear_lr_warmup(
learning_rate=learning_rate,
warmup_steps=500,
start_lr=FLAGS.lr / 3,
end_lr=FLAGS.lr)
decay = FLAGS.weight_decay
optimizer = fluid.optimizer.Momentum(
momentum=FLAGS.momentum,
learning_rate=learning_rate,
regularization=fluid.regularizer.L2Decay(decay))
avg_loss = train_fetch_list[0]
optimizer.minimize(avg_loss)
exe.run(startup_prog)
compiled_train_prog = fluid.CompiledProgram(train_prog).with_data_parallel(
loss_name=avg_loss.name)
load_weights(exe, train_prog, PRETRAIN_WEIGHTS)
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
end = time.time()
def forever():
while True:
try:
yield next(train_loader)
except StopIteration:
pass
for idx, batch in enumerate(forever()):
if idx > total_steps:
break
data_time.update(time.time() - end)
fetches = exe.run(
compiled_train_prog, feed=batch, fetch_list=train_fetch_list)
loss = np.mean(fetches[0])
losses.update(loss, FLAGS.whole_batch_size)
if FLAGS.check_loss_steps > 0:
if idx == 0:
loss_start = loss
else:
loss_end = loss
if idx % FLAGS.print_freq == 0 and idx > 1:
print('Epoch: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Speed {2:.3f} ({3:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
idx, total_steps,
FLAGS.whole_batch_size / batch_time.val,
FLAGS.whole_batch_size / batch_time.avg,
batch_time=batch_time,
data_time=data_time, loss=losses))
if idx % FLAGS.ckpt_freq == 0 and idx > 1:
ckpt_path = os.path.join('checkpoint', "{:02d}".format(idx))
if os.path.isdir(ckpt_path):
shutil.rmtree(ckpt_path)
print('Save model to {}.'.format(ckpt_path))
fluid.io.save_persistables(exe, ckpt_path, train_prog)
batch_time.update(time.time() - end)
end = time.time()
if FLAGS.check_loss_steps > 0:
assert loss_start > loss_end, \
'loss should decrease after training for {} steps'.format(
FLAGS.check_loss_steps)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Paddle Single Shot MultiBox Detector Training')
parser.add_argument('data', metavar='DIR', help='path to dataset')
parser.add_argument('-j', '--num_threads', default=4, type=int,
metavar='N', help='number of threads (default: 4)')
parser.add_argument('-b', '--batch-size', default=8, type=int,
metavar='N', help='mini-batch size (default: 8)')
parser.add_argument('--lr', '--learning-rate', default=0.001, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=5e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--print-freq', '-p', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--ckpt-freq', '-c', default=5000, type=int,
metavar='N',
help='checkpoint frequency (default: 5000)')
parser.add_argument('--check-loss-steps', '-t', default=-1, type=int,
metavar='N', help='check N steps for loss convergence')
FLAGS = parser.parse_args()
assert FLAGS.data, "error: must provide data path"
# In PaddlePaddle 2.x, we turn on dynamic graph mode by default, and 'data()' is only supported in static graph mode.
# So if you want to use this api, please call 'paddle.enable_static()' before this api to enter static graph mode.
paddle.enable_static()
main()
|
tests/python/twitter/common/dirutil/size_test.py | zhouyijiaren/commons | 1,143 | 11074158 | <reponame>zhouyijiaren/commons<filename>tests/python/twitter/common/dirutil/size_test.py
# ==================================================================================================
# Copyright 2012 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
import os
import stat
from twitter.common.contextutil import temporary_file, temporary_dir
from twitter.common.dirutil import safe_size, safe_bsize, du
def create_files(tempdir, *filenames):
for filename in filenames:
with open(os.path.join(tempdir, filename), 'w') as fp:
fp.close()
def test_size():
with temporary_dir() as td:
create_files(td, 'file1.txt')
file1 = os.path.join(td, 'file1.txt')
assert safe_size(file1) == 0
with open(file1, 'w') as fp:
fp.write('!' * 101)
assert safe_size(file1) == 101
f1stat = os.stat(file1)
assert safe_bsize(file1) == 512 * f1stat.st_blocks
assert du(td) == safe_bsize(file1)
file2 = os.path.join(td, 'file2.txt')
os.symlink('file1.txt', file2)
assert safe_size(file2) == len('file1.txt')
assert safe_bsize(file2) == len('file1.txt')
assert du(td) == safe_bsize(file1) + len('file1.txt')
assert safe_size(os.path.join(td, 'file3.txt')) == 0
assert safe_bsize(os.path.join(td, 'file3.txt')) == 0
errors = []
def on_error(path, err):
errors.append(path)
safe_size(os.path.join(td, 'file3.txt'), on_error=on_error)
assert errors == [os.path.join(td, 'file3.txt')]
|
samples/invoice/remind.py | Hey-Marvelous/PayPal-Python-SDK | 653 | 11074186 | from paypalrestsdk import Invoice
import logging
logging.basicConfig(level=logging.INFO)
invoice = Invoice.find("INV2-9CAH-K5G7-2JPL-G4B4")
options = {
"subject": "Past due",
"note": "Please pay soon",
"send_to_merchant": True
}
if invoice.remind(options): # return True or False
print("Invoice[%s] remind successfully" % (invoice.id))
else:
print(invoice.error)
|
test/mitmproxy/proxy/conftest.py | KarlParkinson/mitmproxy | 24,939 | 11074208 | import os
import pytest
from hypothesis import settings
from mitmproxy import connection, options
from mitmproxy.addons.proxyserver import Proxyserver
from mitmproxy.addons.termlog import TermLog
from mitmproxy.proxy import context
@pytest.fixture
def tctx() -> context.Context:
opts = options.Options()
Proxyserver().load(opts)
TermLog().load(opts)
return context.Context(
connection.Client(
("client", 1234),
("127.0.0.1", 8080),
1605699329
),
opts
)
settings.register_profile("fast", max_examples=10)
settings.register_profile("deep", max_examples=100_000, deadline=None)
settings.load_profile(os.getenv("HYPOTHESIS_PROFILE", "fast"))
|
Python-3/ordered_dict_example.py | ghiloufibelgacem/jornaldev | 1,139 | 11074215 | from collections import OrderedDict
# creating a simple dict
my_dict = {'kiwi': 4, 'apple': 5, 'cat': 3}
# creating empty ordered dict
ordered_dict = OrderedDict()
print(ordered_dict)
# creating ordered dict from dict
ordered_dict = OrderedDict(my_dict)
print(ordered_dict)
# adding elements to dict
ordered_dict['dog'] = 3
# replacing a dict key value
ordered_dict['kiwi'] = 10
print(ordered_dict)
# removing and adding a value
ordered_dict.pop('kiwi')
print(ordered_dict)
ordered_dict['kiwi'] = 4
print(ordered_dict)
# moving apple to end and dog to start
ordered_dict.move_to_end('apple')
ordered_dict.move_to_end('dog', False)
print(ordered_dict)
# pop last item
item = ordered_dict.popitem(True)
print(item)
print(ordered_dict)
# reversed iteration
for item in reversed(ordered_dict):
print(item)
# equality tests
d1 = {'a': 'A', 'b': 'B'}
d2 = {'b': 'B', 'a': 'A'}
# From python 3.6 onwards, order is retained for keyword arguments passed to the OrderedDict constructor
# Reference: https://www.python.org/dev/peps/pep-0468/
od1 = OrderedDict({'a': 'A', 'b': 'B'})
od2 = OrderedDict({'b': 'B', 'a': 'A'})
print(d1 == d2)
print(od1 == od2)
print(d1 == od1) |
yt/fields/xray_emission_fields.py | Xarthisius/yt | 360 | 11074236 | <gh_stars>100-1000
import os
import numpy as np
from yt.config import ytcfg
from yt.fields.derived_field import DerivedField
from yt.funcs import mylog, only_on_root, parse_h5_attr
from yt.units.yt_array import YTArray, YTQuantity
from yt.utilities.cosmology import Cosmology
from yt.utilities.exceptions import YTException, YTFieldNotFound
from yt.utilities.linear_interpolators import (
BilinearFieldInterpolator,
UnilinearFieldInterpolator,
)
from yt.utilities.on_demand_imports import _h5py as h5py
data_version = {"cloudy": 2, "apec": 3}
data_url = "http://yt-project.org/data"
def _get_data_file(table_type, data_dir=None):
data_file = "%s_emissivity_v%d.h5" % (table_type, data_version[table_type])
if data_dir is None:
supp_data_dir = ytcfg.get("yt", "supp_data_dir")
data_dir = supp_data_dir if os.path.exists(supp_data_dir) else "."
data_path = os.path.join(data_dir, data_file)
if not os.path.exists(data_path):
msg = "Failed to find emissivity data file {}! Please download from {}".format(
data_file,
data_url,
)
mylog.error(msg)
raise OSError(msg)
return data_path
class EnergyBoundsException(YTException):
def __init__(self, lower, upper):
self.lower = lower
self.upper = upper
def __str__(self):
return f"Energy bounds are {self.lower:e} to {self.upper:e} keV."
class ObsoleteDataException(YTException):
def __init__(self, table_type):
data_file = "%s_emissivity_v%d.h5" % (table_type, data_version[table_type])
self.msg = "X-ray emissivity data is out of date.\n"
self.msg += f"Download the latest data from {data_url}/{data_file}."
def __str__(self):
return self.msg
class XrayEmissivityIntegrator:
r"""Class for making X-ray emissivity fields. Uses hdf5 data tables
generated from Cloudy and AtomDB/APEC.
Initialize an XrayEmissivityIntegrator object.
Parameters
----------
table_type : string
The type of data to use when computing the emissivity values. If "cloudy",
a file called "cloudy_emissivity.h5" is used, for photoionized
plasmas. If, "apec", a file called "apec_emissivity.h5" is used for
collisionally ionized plasmas. These files contain emissivity tables
for primordial elements and for metals at solar metallicity for the
energy range 0.1 to 100 keV.
redshift : float, optional
The cosmological redshift of the source of the field. Default: 0.0.
data_dir : string, optional
The location to look for the data table in. If not supplied, the file
will be looked for in the location of the YT_DEST environment variable
or in the current working directory.
use_metals : boolean, optional
If set to True, the emissivity will include contributions from metals.
Default: True
"""
def __init__(self, table_type, redshift=0.0, data_dir=None, use_metals=True):
filename = _get_data_file(table_type, data_dir=data_dir)
only_on_root(mylog.info, "Loading emissivity data from %s", filename)
in_file = h5py.File(filename, mode="r")
if "info" in in_file.attrs:
only_on_root(mylog.info, parse_h5_attr(in_file, "info"))
if parse_h5_attr(in_file, "version") != data_version[table_type]:
raise ObsoleteDataException(table_type)
else:
only_on_root(
mylog.info,
"X-ray '%s' emissivity data version: %s."
% (table_type, parse_h5_attr(in_file, "version")),
)
self.log_T = in_file["log_T"][:]
self.emissivity_primordial = in_file["emissivity_primordial"][:]
if "log_nH" in in_file:
self.log_nH = in_file["log_nH"][:]
if use_metals:
self.emissivity_metals = in_file["emissivity_metals"][:]
self.ebin = YTArray(in_file["E"], "keV")
in_file.close()
self.dE = np.diff(self.ebin)
self.emid = 0.5 * (self.ebin[1:] + self.ebin[:-1]).to("erg")
self.redshift = redshift
def get_interpolator(self, data_type, e_min, e_max, energy=True):
data = getattr(self, f"emissivity_{data_type}")
if not energy:
data = data[..., :] / self.emid.v
e_min = YTQuantity(e_min, "keV") * (1.0 + self.redshift)
e_max = YTQuantity(e_max, "keV") * (1.0 + self.redshift)
if (e_min - self.ebin[0]) / e_min < -1e-3 or (
e_max - self.ebin[-1]
) / e_max > 1e-3:
raise EnergyBoundsException(self.ebin[0], self.ebin[-1])
e_is, e_ie = np.digitize([e_min, e_max], self.ebin)
e_is = np.clip(e_is - 1, 0, self.ebin.size - 1)
e_ie = np.clip(e_ie, 0, self.ebin.size - 1)
my_dE = self.dE[e_is:e_ie].copy()
# clip edge bins if the requested range is smaller
my_dE[0] -= e_min - self.ebin[e_is]
my_dE[-1] -= self.ebin[e_ie] - e_max
interp_data = (data[..., e_is:e_ie] * my_dE).sum(axis=-1)
if data.ndim == 2:
emiss = UnilinearFieldInterpolator(
np.log10(interp_data),
[self.log_T[0], self.log_T[-1]],
"log_T",
truncate=True,
)
else:
emiss = BilinearFieldInterpolator(
np.log10(interp_data),
[self.log_nH[0], self.log_nH[-1], self.log_T[0], self.log_T[-1]],
["log_nH", "log_T"],
truncate=True,
)
return emiss
def add_xray_emissivity_field(
ds,
e_min,
e_max,
redshift=0.0,
metallicity=("gas", "metallicity"),
table_type="cloudy",
data_dir=None,
cosmology=None,
dist=None,
ftype="gas",
):
r"""Create X-ray emissivity fields for a given energy range.
Parameters
----------
e_min : float
The minimum energy in keV for the energy band.
e_min : float
The maximum energy in keV for the energy band.
redshift : float, optional
The cosmological redshift of the source of the field. Default: 0.0.
metallicity : str or tuple of str or float, optional
Either the name of a metallicity field or a single floating-point
number specifying a spatially constant metallicity. Must be in
solar units. If set to None, no metals will be assumed. Default:
("gas", "metallicity")
table_type : string, optional
The type of emissivity table to be used when creating the fields.
Options are "cloudy" or "apec". Default: "cloudy"
data_dir : string, optional
The location to look for the data table in. If not supplied, the file
will be looked for in the location of the YT_DEST environment variable
or in the current working directory.
cosmology : :class:`~yt.utilities.cosmology.Cosmology`, optional
If set and redshift > 0.0, this cosmology will be used when computing the
cosmological dependence of the emission fields. If not set, yt's default
LCDM cosmology will be used.
dist : (value, unit) tuple or :class:`~yt.units.yt_array.YTQuantity`, optional
The distance to the source, used for making intensity fields. You should
only use this if your source is nearby (not cosmological). Default: None
ftype : string, optional
The field type to use when creating the fields, default "gas"
This will create at least three fields:
"xray_emissivity_{e_min}_{e_max}_keV" (erg s^-1 cm^-3)
"xray_luminosity_{e_min}_{e_max}_keV" (erg s^-1)
"xray_photon_emissivity_{e_min}_{e_max}_keV" (photons s^-1 cm^-3)
and if a redshift or distance is specified it will create two others:
"xray_intensity_{e_min}_{e_max}_keV" (erg s^-1 cm^-3 arcsec^-2)
"xray_photon_intensity_{e_min}_{e_max}_keV" (photons s^-1 cm^-3 arcsec^-2)
These latter two are really only useful when making projections.
Examples
--------
>>> import yt
>>> ds = yt.load("sloshing_nomag2_hdf5_plt_cnt_0100")
>>> yt.add_xray_emissivity_field(ds, 0.5, 2)
>>> p = yt.ProjectionPlot(
... ds, "x", ("gas", "xray_emissivity_0.5_2_keV"), table_type="apec"
... )
>>> p.save()
"""
if not isinstance(metallicity, float) and metallicity is not None:
try:
metallicity = ds._get_field_info(*metallicity)
except YTFieldNotFound as e:
raise RuntimeError(
f"Your dataset does not have a {metallicity} field! "
+ "Perhaps you should specify a constant metallicity instead?"
) from e
if table_type == "cloudy":
# Cloudy wants to scale by nH**2
other_n = "H_nuclei_density"
else:
# APEC wants to scale by nH*ne
other_n = "El_number_density"
def _norm_field(field, data):
return data[ftype, "H_nuclei_density"] * data[ftype, other_n]
ds.add_field(
(ftype, "norm_field"), _norm_field, units="cm**-6", sampling_type="local"
)
my_si = XrayEmissivityIntegrator(table_type, data_dir=data_dir, redshift=redshift)
em_0 = my_si.get_interpolator("primordial", e_min, e_max)
emp_0 = my_si.get_interpolator("primordial", e_min, e_max, energy=False)
if metallicity is not None:
em_Z = my_si.get_interpolator("metals", e_min, e_max)
emp_Z = my_si.get_interpolator("metals", e_min, e_max, energy=False)
def _emissivity_field(field, data):
with np.errstate(all="ignore"):
dd = {
"log_nH": np.log10(data[ftype, "H_nuclei_density"]),
"log_T": np.log10(data[ftype, "temperature"]),
}
my_emissivity = np.power(10, em_0(dd))
if metallicity is not None:
if isinstance(metallicity, DerivedField):
my_Z = data[metallicity.name].to("Zsun")
else:
my_Z = metallicity
my_emissivity += my_Z * np.power(10, em_Z(dd))
my_emissivity[np.isnan(my_emissivity)] = 0
return data[ftype, "norm_field"] * YTArray(my_emissivity, "erg*cm**3/s")
emiss_name = (ftype, f"xray_emissivity_{e_min}_{e_max}_keV")
ds.add_field(
emiss_name,
function=_emissivity_field,
display_name=fr"\epsilon_{{X}} ({e_min}-{e_max} keV)",
sampling_type="local",
units="erg/cm**3/s",
)
def _luminosity_field(field, data):
return data[emiss_name] * data[ftype, "mass"] / data[ftype, "density"]
lum_name = (ftype, f"xray_luminosity_{e_min}_{e_max}_keV")
ds.add_field(
lum_name,
function=_luminosity_field,
display_name=fr"\rm{{L}}_{{X}} ({e_min}-{e_max} keV)",
sampling_type="local",
units="erg/s",
)
def _photon_emissivity_field(field, data):
dd = {
"log_nH": np.log10(data[ftype, "H_nuclei_density"]),
"log_T": np.log10(data[ftype, "temperature"]),
}
my_emissivity = np.power(10, emp_0(dd))
if metallicity is not None:
if isinstance(metallicity, DerivedField):
my_Z = data[metallicity.name].to("Zsun")
else:
my_Z = metallicity
my_emissivity += my_Z * np.power(10, emp_Z(dd))
return data[ftype, "norm_field"] * YTArray(my_emissivity, "photons*cm**3/s")
phot_name = (ftype, f"xray_photon_emissivity_{e_min}_{e_max}_keV")
ds.add_field(
phot_name,
function=_photon_emissivity_field,
display_name=fr"\epsilon_{{X}} ({e_min}-{e_max} keV)",
sampling_type="local",
units="photons/cm**3/s",
)
fields = [emiss_name, lum_name, phot_name]
if redshift > 0.0 or dist is not None:
if dist is None:
if cosmology is None:
if hasattr(ds, "cosmology"):
cosmology = ds.cosmology
else:
cosmology = Cosmology()
D_L = cosmology.luminosity_distance(0.0, redshift)
angular_scale = 1.0 / cosmology.angular_scale(0.0, redshift)
dist_fac = ds.quan(
1.0 / (4.0 * np.pi * D_L * D_L * angular_scale * angular_scale).v,
"rad**-2",
)
else:
redshift = 0.0 # Only for local sources!
try:
# normal behaviour, if dist is a YTQuantity
dist = ds.quan(dist.value, dist.units)
except AttributeError as e:
try:
dist = ds.quan(*dist)
except (RuntimeError, TypeError):
raise TypeError(
"dist should be a YTQuantity or a (value, unit) tuple!"
) from e
angular_scale = dist / ds.quan(1.0, "radian")
dist_fac = ds.quan(
1.0 / (4.0 * np.pi * dist * dist * angular_scale * angular_scale).v,
"rad**-2",
)
ei_name = (ftype, f"xray_intensity_{e_min}_{e_max}_keV")
def _intensity_field(field, data):
I = dist_fac * data[emiss_name]
return I.in_units("erg/cm**3/s/arcsec**2")
ds.add_field(
ei_name,
function=_intensity_field,
display_name=fr"I_{{X}} ({e_min}-{e_max} keV)",
sampling_type="local",
units="erg/cm**3/s/arcsec**2",
)
i_name = (ftype, f"xray_photon_intensity_{e_min}_{e_max}_keV")
def _photon_intensity_field(field, data):
I = (1.0 + redshift) * dist_fac * data[phot_name]
return I.in_units("photons/cm**3/s/arcsec**2")
ds.add_field(
i_name,
function=_photon_intensity_field,
display_name=fr"I_{{X}} ({e_min}-{e_max} keV)",
sampling_type="local",
units="photons/cm**3/s/arcsec**2",
)
fields += [ei_name, i_name]
for field in fields:
mylog.info("Adding ('%s','%s') field.", field[0], field[1])
return fields
|
docs/tutorial/helpers/filternet_images_helpers.py | aaberbach/bmtk | 216 | 11074247 | import matplotlib.pyplot as plt
from bmtk.simulator.filternet.lgnmodel.temporalfilter import TemporalFilterCosineBump, TemporalFilter
from bmtk.simulator.filternet.lgnmodel.spatialfilter import GaussianSpatialFilter
from bmtk.simulator.filternet.lgnmodel import movie
#################################################################################
#### Functions for generating some of the figures in the filternet notebook #####
#################################################################################
def plot_tfilter_params():
gm = movie.GratingMovie(120, 240)
mv = gm.create_movie(t_max=2.0)
#
# tf = TemporalFilterCosineBump(weights=[33.328, -20.10059],
# kpeaks=[59.0, 120.0], # [9.67, 20.03],
# delays=[0.0, 0.0])
#
# tf.get_kernel(t_range=mv.t_range, threshold=0.0, reverse=True)
# tf.imshow(t_range=mv.t_range, reverse=True)
fig, axes = plt.subplots(3, 3, figsize=(10, 7))
ri = ci = 0
weights = [(30.0, -20.0), (30.0, -1.0), (15.0, -20.0)]
kpeaks = [(3.0, 5.0), (3.0, 30.0), (20.0, 40.0)]
delays = [(0.0, 0.0), (0.0, 60.0), (20.0, 60.0)]
# weights control the amplitude of the peaks
for ci, w in enumerate(weights):
tf = TemporalFilterCosineBump(weights=w,
kpeaks=[9.67, 20.03],
delays=[0.0, 1.0])
linear_kernel = tf.get_kernel(t_range=mv.t_range, reverse=True)
axes[ri, ci].plot(linear_kernel.t_range[linear_kernel.t_inds], linear_kernel.kernel)
axes[ri, ci].set_ylim([-3.5, 10.0])
axes[ri, ci].text(0.05, 0.90, 'weights={}'.format(w), horizontalalignment='left', verticalalignment='top',
transform=axes[ri, ci].transAxes)
axes[0, 0].set_ylabel('effect of weights')
ri += 1
# kpeaks parameters controll the spread of both peaks, the second peak must have a bigger spread
for ci, kp in enumerate(kpeaks):
tf = TemporalFilterCosineBump(weights=[30.0, -20.0],
kpeaks=kp,
delays=[0.0, 1.0])
linear_kernel = tf.get_kernel(t_range=mv.t_range, reverse=True)
axes[ri, ci].plot(linear_kernel.t_range[linear_kernel.t_inds], linear_kernel.kernel)
axes[ri, ci].set_xlim([-0.15, 0.005])
axes[ri, ci].text(0.05, 0.90, 'kpeaks={}'.format(kp), horizontalalignment='left', verticalalignment='top',
transform=axes[ri, ci].transAxes)
axes[1, 0].set_ylabel('effects of kpeaks')
ri += 1
for ci, d in enumerate(delays):
tf = TemporalFilterCosineBump(weights=[30.0, -20.0],
kpeaks=[9.67, 20.03],
delays=d)
linear_kernel = tf.get_kernel(t_range=mv.t_range, reverse=True)
axes[ri, ci].plot(linear_kernel.t_range[linear_kernel.t_inds], linear_kernel.kernel)
axes[ri, ci].set_xlim([-0.125, 0.001])
axes[ri, ci].text(0.05, 0.90, 'delays={}'.format(d), horizontalalignment='left', verticalalignment='top',
transform=axes[ri, ci].transAxes)
axes[2, 0].set_ylabel('effects of delays')
# plt.xlim()
plt.show()
def plot_sfilter_params():
gm = movie.GratingMovie(200, 200)
mv = gm.create_movie(t_max=2.0)
fig, axes = plt.subplots(2, 2, figsize=(7, 7))
rotations = [0.0, 45.0]
sigmas = [(30.0, 20.0), (20.0, 30.0)]
for r, sigma in enumerate(sigmas):
for c, rot in enumerate(rotations):
gsf = GaussianSpatialFilter(translate=(0, 0), sigma=sigma, rotation=rot)
axes[r, c].imshow(gsf.get_kernel(mv.row_range, mv.col_range).full(), extent=(0, 200, 0, 200), origin='lower')
if r == 0:
axes[r, c].title.set_text('spatial_rotation={}'.format(rot))
if c == 0:
axes[r, c].set_ylabel('spatial_size={}'.format(sigma))
plt.show()
if __name__ == '__main__':
# plot_tfilter_params()
plot_sfilter_params()
# tf.imshow(t_range=mv.t_range, reverse=True) |
config.py | sgarbirodrigo/ml-sound-classifier | 118 | 11074251 | # Freesound Dataset Kaggle 2018
# Application configurations
from easydict import EasyDict
conf = EasyDict()
# Basic configurations
conf.sampling_rate = 44100
conf.duration = 1
conf.hop_length = 347 # to make time steps 128
conf.fmin = 20
conf.fmax = conf.sampling_rate // 2
conf.n_mels = 128
conf.n_fft = conf.n_mels * 20
conf.model = 'mobilenetv2' # 'alexnet'
# Labels
conf.labels = ['Hi-hat', 'Saxophone', 'Trumpet', 'Glockenspiel', 'Cello', 'Knock',
'Gunshot_or_gunfire', 'Clarinet', 'Computer_keyboard',
'Keys_jangling', 'Snare_drum', 'Writing', 'Laughter', 'Tearing',
'Fart', 'Oboe', 'Flute', 'Cough', 'Telephone', 'Bark', 'Chime',
'Bass_drum', 'Bus', 'Squeak', 'Scissors', 'Harmonica', 'Gong',
'Microwave_oven', 'Burping_or_eructation', 'Double_bass', 'Shatter',
'Fireworks', 'Tambourine', 'Cowbell', 'Electric_piano', 'Meow',
'Drawer_open_or_close', 'Applause', 'Acoustic_guitar',
'Violin_or_fiddle', 'Finger_snapping']
# Training configurations
conf.folder = '.'
conf.n_fold = 1
conf.normalize = 'samplewise'
conf.valid_limit = None
conf.random_state = 42
conf.test_size = 0.01
conf.samples_per_file = 5
conf.batch_size = 32
conf.learning_rate = 0.0001
conf.epochs = 500
conf.verbose = 2
conf.best_weight_file = 'best_mobilenetv2_weight.h5'
# Runtime conficurations
conf.rt_process_count = 1
conf.rt_oversamples = 10
conf.pred_ensembles = 10
conf.runtime_model_file = 'model/mobilenetv2_fsd2018_41cls.pb'
|
random_bonus/gan_n_cgan_2d_example/sampler.py | wang420349864/dlcv_for_beginners | 1,424 | 11074260 | from functools import partial
import numpy
from skimage import transform
EPS = 1e-66
RESOLUTION = 0.001
num_grids = int(1/RESOLUTION+0.5)
def generate_lut(img):
"""
linear approximation of CDF & marginal
:param density_img:
:return: lut_y, lut_x
"""
density_img = transform.resize(img, (num_grids, num_grids))
x_accumlation = numpy.sum(density_img, axis=1)
sum_xy = numpy.sum(x_accumlation)
y_cdf_of_accumulated_x = [[0., 0.]]
accumulated = 0
for ir, i in enumerate(range(num_grids-1, -1, -1)):
accumulated += x_accumlation[i]
if accumulated == 0:
y_cdf_of_accumulated_x[0][0] = float(ir+1)/float(num_grids)
elif EPS < accumulated < sum_xy - EPS:
y_cdf_of_accumulated_x.append([float(ir+1)/float(num_grids), accumulated/sum_xy])
else:
break
y_cdf_of_accumulated_x.append([float(ir+1)/float(num_grids), 1.])
y_cdf_of_accumulated_x = numpy.array(y_cdf_of_accumulated_x)
x_cdfs = []
for j in range(num_grids):
x_freq = density_img[num_grids-j-1]
sum_x = numpy.sum(x_freq)
x_cdf = [[0., 0.]]
accumulated = 0
for i in range(num_grids):
accumulated += x_freq[i]
if accumulated == 0:
x_cdf[0][0] = float(i+1) / float(num_grids)
elif EPS < accumulated < sum_xy - EPS:
x_cdf.append([float(i+1)/float(num_grids), accumulated/sum_x])
else:
break
x_cdf.append([float(i+1)/float(num_grids), 1.])
if accumulated > EPS:
x_cdf = numpy.array(x_cdf)
x_cdfs.append(x_cdf)
else:
x_cdfs.append(None)
y_lut = partial(numpy.interp, xp=y_cdf_of_accumulated_x[:, 1], fp=y_cdf_of_accumulated_x[:, 0])
x_luts = [partial(numpy.interp, xp=x_cdfs[i][:, 1], fp=x_cdfs[i][:, 0]) if x_cdfs[i] is not None else None for i in range(num_grids)]
return y_lut, x_luts
def sample_2d(lut, N):
y_lut, x_luts = lut
u_rv = numpy.random.random((N, 2))
samples = numpy.zeros(u_rv.shape)
for i, (x, y) in enumerate(u_rv):
ys = y_lut(y)
x_bin = int(ys/RESOLUTION)
xs = x_luts[x_bin](x)
samples[i][0] = xs
samples[i][1] = ys
return samples
if __name__ == '__main__':
from skimage import io
density_img = io.imread('inputs/random.jpg', True)
lut_2d = generate_lut(density_img)
samples = sample_2d(lut_2d, 10000)
from matplotlib import pyplot
fig, (ax0, ax1) = pyplot.subplots(ncols=2, figsize=(9, 4))
fig.canvas.set_window_title('Test 2D Sampling')
ax0.imshow(density_img, cmap='gray')
ax0.xaxis.set_major_locator(pyplot.NullLocator())
ax0.yaxis.set_major_locator(pyplot.NullLocator())
ax1.axis('equal')
ax1.axis([0, 1, 0, 1])
ax1.plot(samples[:, 0], samples[:, 1], 'k,')
pyplot.show()
|
tests/test_link_options.py | rubensmau/splink | 176 | 11074265 | import pytest
from pyspark.sql import Row
from splink.blocking import block_using_rules
from splink.default_settings import complete_settings_dict
from splink.vertically_concat import vertically_concatenate_datasets
@pytest.fixture(scope="module")
def link_dedupe_data(spark):
# fmt: off
data_l = [
{ "source_dataset": "l", "unique_id": 1, "surname": "Linacre", "first_name": "Robin", },
{ "source_dataset": "l", "unique_id": 2, "surname": "Smith", "first_name": "John", },
]
data_r = [
{ "source_dataset": "r", "unique_id": 7, "surname": "Linacre", "first_name": "Robin", },
{ "source_dataset": "r", "unique_id": 8, "surname": "Smith", "first_name": "John", },
{ "source_dataset": "r", "unique_id": 9, "surname": "Smith", "first_name": "Robin", },
]
# fmt: on
df_l = spark.createDataFrame(Row(**x) for x in data_l)
df_r = spark.createDataFrame(Row(**x) for x in data_r)
data_l.extend(data_r)
df = spark.createDataFrame(Row(**x) for x in data_l)
yield {"df": df, "df_l": df_l, "df_r": df_r}
@pytest.fixture(scope="module")
def link_dedupe_data_repeat_ids(spark):
# fmt: off
data_l = [
{"source_dataset": "l", "unique_id": 1, "surname": "Linacre", "first_name": "Robin"},
{"source_dataset": "l", "unique_id": 2, "surname": "Smith", "first_name": "John"},
{"source_dataset": "l", "unique_id": 3, "surname": "Smith", "first_name": "John"},
]
data_r = [
{"source_dataset": "r", "unique_id": 1, "surname": "Linacre", "first_name": "Robin"},
{"source_dataset": "r", "unique_id": 2, "surname": "Smith", "first_name": "John"},
{"source_dataset": "r", "unique_id": 3, "surname": "Smith", "first_name": "Robin"},
]
# fmt: on
df_l = spark.createDataFrame(Row(**x) for x in data_l)
df_r = spark.createDataFrame(Row(**x) for x in data_r)
data_l.extend(data_r)
df = spark.createDataFrame(Row(**x) for x in data_l)
yield {"df": df, "df_l": df_l, "df_r": df_r}
def test_no_blocking(spark, link_dedupe_data):
settings = {
"link_type": "link_only",
"comparison_columns": [{"col_name": "first_name"}, {"col_name": "surname"}],
"blocking_rules": [],
}
settings = complete_settings_dict(settings, spark)
df_l = link_dedupe_data["df_l"]
df_r = link_dedupe_data["df_r"]
df = vertically_concatenate_datasets([df_l, df_r])
df_comparison = block_using_rules(settings, df, spark)
df = df_comparison.toPandas()
df = df.sort_values(["unique_id_l", "unique_id_r"])
assert list(df["unique_id_l"]) == [1, 1, 1, 2, 2, 2]
assert list(df["unique_id_r"]) == [7, 8, 9, 7, 8, 9]
def test_link_only(spark, link_dedupe_data, link_dedupe_data_repeat_ids):
settings = {
"link_type": "link_only",
"comparison_columns": [{"col_name": "first_name"}, {"col_name": "surname"}],
"blocking_rules": ["l.first_name = r.first_name", "l.surname = r.surname"],
}
settings = complete_settings_dict(settings, spark)
df_l = link_dedupe_data["df_l"]
df_r = link_dedupe_data["df_r"]
df = vertically_concatenate_datasets([df_l, df_r])
df_comparison = block_using_rules(settings, df, spark)
df = df_comparison.toPandas()
df = df.sort_values(["unique_id_l", "unique_id_r"])
assert list(df["unique_id_l"]) == [1, 1, 2, 2]
assert list(df["unique_id_r"]) == [7, 9, 8, 9]
df_l = link_dedupe_data_repeat_ids["df_l"]
df_r = link_dedupe_data_repeat_ids["df_r"]
df = vertically_concatenate_datasets([df_l, df_r])
df_comparison = block_using_rules(settings, df, spark)
df = df_comparison.toPandas()
df = df.sort_values(["unique_id_l", "unique_id_r"])
assert list(df["unique_id_l"]) == [1, 1, 2, 2, 3, 3]
assert list(df["unique_id_r"]) == [1, 3, 2, 3, 2, 3]
settings = {
"link_type": "link_only",
"comparison_columns": [{"col_name": "first_name"}, {"col_name": "surname"}],
"blocking_rules": [],
}
settings = complete_settings_dict(settings, spark)
df = vertically_concatenate_datasets([df_l, df_r])
df_comparison = block_using_rules(settings, df, spark)
df = df_comparison.toPandas()
df = df.sort_values(["unique_id_l", "unique_id_r"])
assert list(df["unique_id_l"]) == [1, 1, 1, 2, 2, 2, 3, 3, 3]
assert list(df["unique_id_r"]) == [1, 2, 3, 1, 2, 3, 1, 2, 3]
def test_link_dedupe(spark, link_dedupe_data, link_dedupe_data_repeat_ids):
settings = {
"link_type": "link_and_dedupe",
"comparison_columns": [{"col_name": "first_name"}, {"col_name": "surname"}],
"blocking_rules": ["l.first_name = r.first_name", "l.surname = r.surname"],
}
settings = complete_settings_dict(settings, spark=spark)
df_l = link_dedupe_data["df_l"]
df_r = link_dedupe_data["df_r"]
df = vertically_concatenate_datasets([df_l, df_r])
df_comparison = block_using_rules(settings, df, spark)
df = df_comparison.toPandas()
df = df.sort_values(["unique_id_l", "unique_id_r"])
assert list(df["unique_id_l"]) == [1, 1, 2, 2, 7, 8]
assert list(df["unique_id_r"]) == [7, 9, 8, 9, 9, 9]
df_l = link_dedupe_data_repeat_ids["df_l"]
df_r = link_dedupe_data_repeat_ids["df_r"]
df = vertically_concatenate_datasets([df_l, df_r])
df = block_using_rules(settings, df, spark)
df = df.toPandas()
df["u_l"] = df["unique_id_l"].astype(str) + df["source_dataset_l"].str.slice(0, 1)
df["u_r"] = df["unique_id_r"].astype(str) + df["source_dataset_r"].str.slice(0, 1)
df = df.sort_values(
["source_dataset_l", "source_dataset_r", "unique_id_l", "unique_id_r"]
)
assert list(df["u_l"]) == ["2l", "1l", "1l", "2l", "2l", "3l", "3l", "1r", "2r"]
assert list(df["u_r"]) == ["3l", "1r", "3r", "2r", "3r", "2r", "3r", "3r", "3r"]
settings = {
"link_type": "link_and_dedupe",
"comparison_columns": [{"col_name": "first_name"}, {"col_name": "surname"}],
"blocking_rules": [],
}
settings = complete_settings_dict(settings, spark=spark)
df_l = link_dedupe_data_repeat_ids["df_l"]
df_r = link_dedupe_data_repeat_ids["df_r"]
df = vertically_concatenate_datasets([df_l, df_r])
df = block_using_rules(settings, df, spark)
df = df.toPandas()
df["u_l"] = df["unique_id_l"].astype(str) + df["source_dataset_l"].str.slice(0, 1)
df["u_r"] = df["unique_id_r"].astype(str) + df["source_dataset_r"].str.slice(0, 1)
df = df.sort_values(
["source_dataset_l", "unique_id_l", "source_dataset_r", "unique_id_r"]
)
# fmt: off
assert list(df["u_l"]) == ["1l", "1l", "1l", "1l", "1l", "2l", "2l", "2l", "2l", "3l", "3l", "3l", "1r", "1r", "2r"]
assert list(df["u_r"]) == ["2l", "3l", "1r", "2r", "3r", "3l", "1r", "2r", "3r", "1r", "2r", "3r", "2r", "3r", "3r"]
# fmt: on
def test_dedupe(spark, link_dedupe_data_repeat_ids):
# This tests checks that we only get one result when a comparison is hit by multiple blocking rules
settings = {
"link_type": "dedupe_only",
"comparison_columns": [{"col_name": "first_name"}, {"col_name": "surname"}],
"blocking_rules": ["l.first_name = r.first_name", "l.surname = r.surname"],
}
settings = complete_settings_dict(settings, spark=None)
df_l = link_dedupe_data_repeat_ids["df_l"]
df = block_using_rules(settings, df_l, spark)
df = df.toPandas()
df = df.sort_values(["unique_id_l", "unique_id_r"])
assert list(df["unique_id_l"]) == [2]
assert list(df["unique_id_r"]) == [3]
# Is the source dataset column retained if it exists?
assert "source_dataset_l" in list(df.columns)
df_l = link_dedupe_data_repeat_ids["df_l"]
df_l = df_l.drop("source_dataset")
df = block_using_rules(settings, df_l, spark)
# Is the source dataset column excluded if it doesn't exist?
assert "source_dataset_l" not in list(df.columns)
# Is the source datasetcolumn included if it has a different name?
df_l = link_dedupe_data_repeat_ids["df_l"]
df_l = df_l.withColumnRenamed("source_dataset", "source_ds")
settings["source_dataset_column_name"] = "source_ds"
df = block_using_rules(settings, df_l, spark)
# Is the source dataset column excluded if it doesn't exist?
assert "source_ds_l" in list(df.columns)
assert "source_dataset_l" not in list(df.columns)
|
img/facilityLocationGIF.py | wfondrie/apricot | 389 | 11074278 | <reponame>wfondrie/apricot<filename>img/facilityLocationGIF.py
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
import numpy
numpy.random.seed(0)
import seaborn; seaborn.set_style('whitegrid')
from apricot import FacilityLocationSelection
numpy.random.seed(0)
X = numpy.concatenate([numpy.random.normal((1, 1), 0.5, size=(15, 2)),
numpy.random.normal((6, 3), 0.5, size=(25, 2)),
numpy.random.normal((5, 7), 0.5, size=(40, 2)),
numpy.random.normal((1, 7), 0.5, size=(30, 2)),
numpy.random.normal((10, 4), 0.5, size=(15, 2)),
numpy.random.normal((3, 4), 0.5, size=(15, 2))])
Xi = FacilityLocationSelection(6, 'euclidean').fit_transform(X)
Xr = numpy.random.choice(numpy.arange(X.shape[0]), size=6)
Xr = X[Xr]
fig = plt.figure(figsize=(8, 6))
ax = plt.subplot(111)
ax.scatter(X[:,0], X[:,1], s=10)
ax.legend(fontsize=14)
ax.set_xlim(-1, 14)
#ax.set_ylim(0, 7)
ax.axis('off')
plt.grid(False)
seaborn.despine(ax=ax)
fig.set_tight_layout(True)
def update(i):
ax.clear()
ax.scatter(X[:,0], X[:,1], s=10)
ax.scatter(Xi[:i,0], Xi[:i,1], color="#FF6600", label="Submodular Selection")
ax.scatter(Xr[:i,0], Xr[:i,1], color="#8A2BE2", label="Random Selection", alpha=0.6)
ax.legend(fontsize=14)
ax.set_xlim(-1, 14)
ax.axis('off')
plt.grid(False)
return ax
anim = FuncAnimation(fig, update, frames=range(7), interval=1000)
anim.save('fl.gif', dpi=80, writer='imagemagick') |
classic_heuristics/gaskell_savings.py | juicetinliu/VeRyPy | 156 | 11074287 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
""" This file is a part of the VeRyPy classical vehicle routing problem
heuristic library and provides implementations of the Gaskell (1967)
\pi and \lambda savings functions for parallel (as in multiple route)
savings heuristic.
The script is callable and can be used as a standalone solver for TSPLIB
formatted CVRPs. It has moderate dependencies: parallel savings heuristic
procedure from parallel_savings.py, built-in TSP solver, and numpy and scipy
for reading and preparing the problem instance."""
###############################################################################
# Written in Python 2.7, but try to maintain Python 3+ compatibility
from builtins import range
import numpy as np
from util import objf, is_better_sol
from classic_heuristics.parallel_savings import parallel_savings_init
__author__ = "<NAME>"
__copyright__ = "Copyright 2018, Jussi Rasku"
__credits__ = ["<NAME>"]
__license__ = "MIT"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
def gaskell_lambda_savings_function(D):
n = len(D)-1
savings = [None]*int((n*n-n)/2)
idx = 0
d_avg = np.average(D[0:])
for i in range(1,n+1):
for j in range(i+1,n+1):
s_AB = D[i,0]+D[0,j]-D[i,j]
lambda_AB = s_AB*(d_avg+abs(D[0,i]-D[0,j])-D[i,j])
savings[idx] = (lambda_AB, -D[i,j], i,j)
idx+=1
savings.sort(reverse=True)
return savings
def gaskell_pi_savings_function(D):
n = len(D)-1
savings = [None]*int((n*n-n)/2)
idx = 0
for i in range(1,n+1):
for j in range(i+1,n+1):
pi_AB = D[i,0]+D[0,j]-2*D[i,j]
savings[idx] = (pi_AB, -D[i,j], i,j)
idx+=1
savings.sort(reverse=True)
return savings
def gaskell_savings_init(D,d,C,L, minimize_K=False, savings_method="both"):
""" Savings algorithm with Gaskell (1967) pi and lambda savings criteria.
Uses parallel_savings.py for the savings procedure.
* D is a numpy ndarray (or equvalent) of the full 2D distance matrix.
* d is a list of demands. d[0] should be 0.0 as it is the depot.
* C is the capacity constraint limit for the identical vehicles.
* L is the optional constraint for the maximum route length/duration/cost.
* minimize_K sets the primary optimization objective. If set to True, it is
the minimum number of routes. If set to False (default) the algorithm
optimizes for the mimimum solution/routing cost. In savings algorithms
this is done by ignoring negative savings values.
* savings_method selects the savings criteria: "lambda" or "pi". If set to
"both" (default) the one with better results is returned.
<NAME>. (1967). Bases for vehicle fleet scheduling. Journal of the
Operational Research Society, 18(3):281-295.
"""
savings_functions = []
if savings_method=="both":
savings_functions = [gaskell_lambda_savings_function,
gaskell_pi_savings_function]
elif savings_method=="lambda":
savings_functions = [gaskell_lambda_savings_function]
elif savings_method=="pi":
savings_functions = [gaskell_pi_savings_function]
else:
raise ValueError("Only 'lambda', 'pi', or 'both' are supported.")
best_sol = None
best_f = None
best_K = None
interrupted = False
for sav_f in savings_functions:
sol, sol_f, sol_K = None, float('inf'), float('inf')
try:
sol = parallel_savings_init(D,d,C,L,minimize_K,sav_f)
except KeyboardInterrupt as e: #or SIGINT
# lambda or pi was interrupted
if len(e.args)>0 and type(e.args[0]) is list:
sol = e.args[0]
interrupted = True
if sol:
sol_f = objf(sol, D)
sol_K = sol.count(0)-1
if is_better_sol(best_f, best_K, sol_f, sol_K, minimize_K):
best_sol = sol
best_f = sol_f
best_K = sol_K
if interrupted:
# pass on the current best_sol
raise KeyboardInterrupt(best_sol)
return best_sol
# ---------------------------------------------------------------------
# Wrapper for the command line user interface (CLI)
def get_gs_algorithm():
algo_name = r"Ga67-PS|pi+lamda"
algo_desc = r"Parallel savings algorithm with Gaskell (1967) $\pi$ and "+\
r"$\lambda$ criteria"
def call_init(points, D, d, C, L, st, wtt, single, minimize_K):
savings_method = "both" if not single else "pi"
return gaskell_savings_init(D,d,C,L, minimize_K, savings_method)
return (algo_name, algo_desc, call_init)
if __name__=="__main__":
from shared_cli import cli
cli(*get_gs_algorithm())
|
autoaugment/policies.py | msf235/sam | 362 | 11074297 | <filename>autoaugment/policies.py<gh_stars>100-1000
# Copyright 2020 The Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AutoAugment policies for Imagenet, Cifar and SVHN."""
from typing import List, Tuple
def policy_imagenet() -> List[List[Tuple[str, float, int]]]:
"""Returns the autoaugment policy that was used in AutoAugment Paper.
A policy is composed of two augmentations applied sequentially to the image.
Each augmentation is described as a tuple where the first element is the
type of transformation to apply, the second is the probability with which the
augmentation should be applied, and the third element is the strength of the
transformation.
"""
policy = [
[('Equalize', 0.8, 1), ('ShearY', 0.8, 4)],
[('Color', 0.4, 9), ('Equalize', 0.6, 3)],
[('Color', 0.4, 1), ('Rotate', 0.6, 8)],
[('Solarize', 0.8, 3), ('Equalize', 0.4, 7)],
[('Solarize', 0.4, 2), ('Solarize', 0.6, 2)],
[('Color', 0.2, 0), ('Equalize', 0.8, 8)],
[('Equalize', 0.4, 8), ('SolarizeAdd', 0.8, 3)],
[('ShearX', 0.2, 9), ('Rotate', 0.6, 8)],
[('Color', 0.6, 1), ('Equalize', 1.0, 2)],
[('Invert', 0.4, 9), ('Rotate', 0.6, 0)],
[('Equalize', 1.0, 9), ('ShearY', 0.6, 3)],
[('Color', 0.4, 7), ('Equalize', 0.6, 0)],
[('Posterize', 0.4, 6), ('AutoContrast', 0.4, 7)],
[('Solarize', 0.6, 8), ('Color', 0.6, 9)],
[('Solarize', 0.2, 4), ('Rotate', 0.8, 9)],
[('Rotate', 1.0, 7), ('TranslateY', 0.8, 9)],
[('ShearX', 0.0, 0), ('Solarize', 0.8, 4)],
[('ShearY', 0.8, 0), ('Color', 0.6, 4)],
[('Color', 1.0, 0), ('Rotate', 0.6, 2)],
[('Equalize', 0.8, 4), ('Equalize', 0.0, 8)],
[('Equalize', 1.0, 4), ('AutoContrast', 0.6, 2)],
[('ShearY', 0.4, 7), ('SolarizeAdd', 0.6, 7)],
[('Posterize', 0.8, 2), ('Solarize', 0.6, 10)],
[('Solarize', 0.6, 8), ('Equalize', 0.6, 1)],
[('Color', 0.8, 6), ('Rotate', 0.4, 5)],
]
return policy
def policy_cifar() -> List[List[Tuple[str, float, int]]]:
"""Returns the AutoAugment policies found on Cifar.
A policy is composed of two augmentations applied sequentially to the image.
Each augmentation is described as a tuple where the first element is the
type of transformation to apply, the second is the probability with which the
augmentation should be applied, and the third element is the strength of the
transformation.
"""
exp0_0 = [
[('Invert', 0.1, 7), ('Contrast', 0.2, 6)],
[('Rotate', 0.7, 2), ('TranslateX', 0.3, 9)],
[('Sharpness', 0.8, 1), ('Sharpness', 0.9, 3)],
[('ShearY', 0.5, 8), ('TranslateY', 0.7, 9)],
[('AutoContrast', 0.5, 8), ('Equalize', 0.9, 2)]]
exp0_1 = [
[('Solarize', 0.4, 5), ('AutoContrast', 0.9, 3)],
[('TranslateY', 0.9, 9), ('TranslateY', 0.7, 9)],
[('AutoContrast', 0.9, 2), ('Solarize', 0.8, 3)],
[('Equalize', 0.8, 8), ('Invert', 0.1, 3)],
[('TranslateY', 0.7, 9), ('AutoContrast', 0.9, 1)]]
exp0_2 = [
[('Solarize', 0.4, 5), ('AutoContrast', 0.0, 2)],
[('TranslateY', 0.7, 9), ('TranslateY', 0.7, 9)],
[('AutoContrast', 0.9, 0), ('Solarize', 0.4, 3)],
[('Equalize', 0.7, 5), ('Invert', 0.1, 3)],
[('TranslateY', 0.7, 9), ('TranslateY', 0.7, 9)]]
exp0_3 = [
[('Solarize', 0.4, 5), ('AutoContrast', 0.9, 1)],
[('TranslateY', 0.8, 9), ('TranslateY', 0.9, 9)],
[('AutoContrast', 0.8, 0), ('TranslateY', 0.7, 9)],
[('TranslateY', 0.2, 7), ('Color', 0.9, 6)],
[('Equalize', 0.7, 6), ('Color', 0.4, 9)]]
exp1_0 = [
[('ShearY', 0.2, 7), ('Posterize', 0.3, 7)],
[('Color', 0.4, 3), ('Brightness', 0.6, 7)],
[('Sharpness', 0.3, 9), ('Brightness', 0.7, 9)],
[('Equalize', 0.6, 5), ('Equalize', 0.5, 1)],
[('Contrast', 0.6, 7), ('Sharpness', 0.6, 5)]]
exp1_1 = [
[('Brightness', 0.3, 7), ('AutoContrast', 0.5, 8)],
[('AutoContrast', 0.9, 4), ('AutoContrast', 0.5, 6)],
[('Solarize', 0.3, 5), ('Equalize', 0.6, 5)],
[('TranslateY', 0.2, 4), ('Sharpness', 0.3, 3)],
[('Brightness', 0.0, 8), ('Color', 0.8, 8)]]
exp1_2 = [
[('Solarize', 0.2, 6), ('Color', 0.8, 6)],
[('Solarize', 0.2, 6), ('AutoContrast', 0.8, 1)],
[('Solarize', 0.4, 1), ('Equalize', 0.6, 5)],
[('Brightness', 0.0, 0), ('Solarize', 0.5, 2)],
[('AutoContrast', 0.9, 5), ('Brightness', 0.5, 3)]]
exp1_3 = [
[('Contrast', 0.7, 5), ('Brightness', 0.0, 2)],
[('Solarize', 0.2, 8), ('Solarize', 0.1, 5)],
[('Contrast', 0.5, 1), ('TranslateY', 0.2, 9)],
[('AutoContrast', 0.6, 5), ('TranslateY', 0.0, 9)],
[('AutoContrast', 0.9, 4), ('Equalize', 0.8, 4)]]
exp1_4 = [
[('Brightness', 0.0, 7), ('Equalize', 0.4, 7)],
[('Solarize', 0.2, 5), ('Equalize', 0.7, 5)],
[('Equalize', 0.6, 8), ('Color', 0.6, 2)],
[('Color', 0.3, 7), ('Color', 0.2, 4)],
[('AutoContrast', 0.5, 2), ('Solarize', 0.7, 2)]]
exp1_5 = [
[('AutoContrast', 0.2, 0), ('Equalize', 0.1, 0)],
[('ShearY', 0.6, 5), ('Equalize', 0.6, 5)],
[('Brightness', 0.9, 3), ('AutoContrast', 0.4, 1)],
[('Equalize', 0.8, 8), ('Equalize', 0.7, 7)],
[('Equalize', 0.7, 7), ('Solarize', 0.5, 0)]]
exp1_6 = [
[('Equalize', 0.8, 4), ('TranslateY', 0.8, 9)],
[('TranslateY', 0.8, 9), ('TranslateY', 0.6, 9)],
[('TranslateY', 0.9, 0), ('TranslateY', 0.5, 9)],
[('AutoContrast', 0.5, 3), ('Solarize', 0.3, 4)],
[('Solarize', 0.5, 3), ('Equalize', 0.4, 4)]]
exp2_0 = [
[('Color', 0.7, 7), ('TranslateX', 0.5, 8)],
[('Equalize', 0.3, 7), ('AutoContrast', 0.4, 8)],
[('TranslateY', 0.4, 3), ('Sharpness', 0.2, 6)],
[('Brightness', 0.9, 6), ('Color', 0.2, 8)],
[('Solarize', 0.5, 2), ('Invert', 0.0, 3)]]
exp2_1 = [
[('AutoContrast', 0.1, 5), ('Brightness', 0.0, 0)],
[('Cutout', 0.2, 4), ('Equalize', 0.1, 1)],
[('Equalize', 0.7, 7), ('AutoContrast', 0.6, 4)],
[('Color', 0.1, 8), ('ShearY', 0.2, 3)],
[('ShearY', 0.4, 2), ('Rotate', 0.7, 0)]]
exp2_2 = [
[('ShearY', 0.1, 3), ('AutoContrast', 0.9, 5)],
[('TranslateY', 0.3, 6), ('Cutout', 0.3, 3)],
[('Equalize', 0.5, 0), ('Solarize', 0.6, 6)],
[('AutoContrast', 0.3, 5), ('Rotate', 0.2, 7)],
[('Equalize', 0.8, 2), ('Invert', 0.4, 0)]]
exp2_3 = [
[('Equalize', 0.9, 5), ('Color', 0.7, 0)],
[('Equalize', 0.1, 1), ('ShearY', 0.1, 3)],
[('AutoContrast', 0.7, 3), ('Equalize', 0.7, 0)],
[('Brightness', 0.5, 1), ('Contrast', 0.1, 7)],
[('Contrast', 0.1, 4), ('Solarize', 0.6, 5)]]
exp2_4 = [
[('Solarize', 0.2, 3), ('ShearX', 0.0, 0)],
[('TranslateX', 0.3, 0), ('TranslateX', 0.6, 0)],
[('Equalize', 0.5, 9), ('TranslateY', 0.6, 7)],
[('ShearX', 0.1, 0), ('Sharpness', 0.5, 1)],
[('Equalize', 0.8, 6), ('Invert', 0.3, 6)]]
exp2_5 = [
[('AutoContrast', 0.3, 9), ('Cutout', 0.5, 3)],
[('ShearX', 0.4, 4), ('AutoContrast', 0.9, 2)],
[('ShearX', 0.0, 3), ('Posterize', 0.0, 3)],
[('Solarize', 0.4, 3), ('Color', 0.2, 4)],
[('Equalize', 0.1, 4), ('Equalize', 0.7, 6)]]
exp2_6 = [
[('Equalize', 0.3, 8), ('AutoContrast', 0.4, 3)],
[('Solarize', 0.6, 4), ('AutoContrast', 0.7, 6)],
[('AutoContrast', 0.2, 9), ('Brightness', 0.4, 8)],
[('Equalize', 0.1, 0), ('Equalize', 0.0, 6)],
[('Equalize', 0.8, 4), ('Equalize', 0.0, 4)]]
exp2_7 = [
[('Equalize', 0.5, 5), ('AutoContrast', 0.1, 2)],
[('Solarize', 0.5, 5), ('AutoContrast', 0.9, 5)],
[('AutoContrast', 0.6, 1), ('AutoContrast', 0.7, 8)],
[('Equalize', 0.2, 0), ('AutoContrast', 0.1, 2)],
[('Equalize', 0.6, 9), ('Equalize', 0.4, 4)]]
exp0s = exp0_0 + exp0_1 + exp0_2 + exp0_3
exp1s = exp1_0 + exp1_1 + exp1_2 + exp1_3 + exp1_4 + exp1_5 + exp1_6
exp2s = exp2_0 + exp2_1 + exp2_2 + exp2_3 + exp2_4 + exp2_5 + exp2_6 + exp2_7
return exp0s + exp1s + exp2s
def policy_svhn() -> List[List[Tuple[str, float, int]]]:
"""Returns the AutoAugment policies found on SVHN.
A policy is composed of two augmentations applied sequentially to the image.
Each augmentation is described as a tuple where the first element is the
type of transformation to apply, the second is the probability with which the
augmentation should be applied, and the third element is the strength of the
transformation.
"""
return [[('ShearX', 0.9, 4), ('Invert', 0.2, 3)],
[('ShearY', 0.9, 8), ('Invert', 0.7, 5)],
[('Equalize', 0.6, 5), ('Solarize', 0.6, 6)],
[('Invert', 0.9, 3), ('Equalize', 0.6, 3)],
[('Equalize', 0.6, 1), ('Rotate', 0.9, 3)],
[('ShearX', 0.9, 4), ('AutoContrast', 0.8, 3)],
[('ShearY', 0.9, 8), ('Invert', 0.4, 5)],
[('ShearY', 0.9, 5), ('Solarize', 0.2, 6)],
[('Invert', 0.9, 6), ('AutoContrast', 0.8, 1)],
[('Equalize', 0.6, 3), ('Rotate', 0.9, 3)],
[('ShearX', 0.9, 4), ('Solarize', 0.3, 3)],
[('ShearY', 0.8, 8), ('Invert', 0.7, 4)],
[('Equalize', 0.9, 5), ('TranslateY', 0.6, 6)],
[('Invert', 0.9, 4), ('Equalize', 0.6, 7)],
[('Contrast', 0.3, 3), ('Rotate', 0.8, 4)],
[('Invert', 0.8, 5), ('TranslateY', 0.0, 2)],
[('ShearY', 0.7, 6), ('Solarize', 0.4, 8)],
[('Invert', 0.6, 4), ('Rotate', 0.8, 4)],
[('ShearY', 0.3, 7), ('TranslateX', 0.9, 3)],
[('ShearX', 0.1, 6), ('Invert', 0.6, 5)],
[('Solarize', 0.7, 2), ('TranslateY', 0.6, 7)],
[('ShearY', 0.8, 4), ('Invert', 0.8, 8)],
[('ShearX', 0.7, 9), ('TranslateY', 0.8, 3)],
[('ShearY', 0.8, 5), ('AutoContrast', 0.7, 3)],
[('ShearX', 0.7, 2), ('Invert', 0.1, 5)]]
|
minpy/dispatch/rule.py | yuhonghong66/minpy | 1,271 | 11074311 | <gh_stars>1000+
"""Rules for rule based policy"""
from __future__ import absolute_import
from __future__ import print_function
import os
import atexit
import yaml
import numpy
from minpy.array_variants import ArrayType
from minpy.array import Array
from minpy.array import Number
from minpy.utils import log
_logger = log.get_logger(__name__) # pylint: disable=invalid-name
# TODO: integrate this part into normal routine when MXNet fixes exception in
# Python.
# Currently MXNet doesn't throw exception raised in mshadow to Python. Avoid
# them by specifying a handcraft whitelist.
MXNET_SUPPORT_TYPES = {'float', 'float16', 'float32'}
MXNET_TYPE_COMPATIBLE_OPS = {
'negative', 'add', 'subtract', 'multiply', 'divide', 'true_divide', 'mod',
'power'
}
# These are MXNet ops that introduces potential issues for further computation.
MXNET_BLACKLIST_OPS = {'array'}
class RuleError(ValueError):
"""Error in rule processing"""
pass
class Rules(object):
"""Rules interface.
Different rule instances act like a single singleton.
Parameters
----------
loc : str
Path to rule configuration file.
save_config_atexit : bool
True will save config after the program exits.
"""
_rules = None
_hash = None
_env_var = '$MINPY_CONF'
_conf_file = '.minpy_rules.conf'
_loc = None
_save_config_atexit = False
def __init__(self, loc=None, save_config_atexit=False):
self.__class__._loc = loc # pylint: disable=protected-access
if save_config_atexit and not self._save_config_atexit:
self.__class__._save_config_atexit = True # pylint: disable=protected-access
atexit.register(self.save_rules_config)
self.load_rules_config()
@classmethod
def _build_hash(cls):
"""Clear hash and rebuild hash by rules"""
raise NotImplementedError()
@classmethod
def load_rules_config(cls, force=False):
"""Load rules configuration from configs and build hash.
Find rule configuration at current directory, self._env_var, and user's
root in order. Then load the config into corresponding class variable.
Load empty rules if loading fails.
Parameters
----------
force : bool
if True, force to load configuration.
"""
# TODO: add package data through installation
# http://peak.telecommunity.com/DevCenter/setuptools#non-package-data-files
if cls._rules is None or force:
config = None
locs = [
os.curdir,
os.path.expandvars(cls._env_var),
os.path.expanduser('~'),
]
locs = [os.path.join(loc, cls._conf_file) for loc in locs]
if cls._loc is not None:
locs.insert(0, cls._loc)
for filename in locs:
try:
with open(filename) as config_file:
config = yaml.safe_load(config_file)
break
except IOError:
pass
except yaml.YAMLError:
_logger.warning('Find corrupted configuration at %s',
filename)
if config is None:
_logger.error(
"Cannot find MinPy's rule configuration %s at %s.",
cls._conf_file, locs)
config = {}
else:
_logger.info('Load and use rule configuration at %s', filename) # pylint: disable=undefined-loop-variable
cls._rules = config
cls._build_hash()
@property
def name(self):
"""Return name of the policy"""
return self.__class__.__name__
@classmethod
def save_rules_config(cls):
'''Save rules configuration from configs and build hash.
Save
'''
loc = cls._loc
if loc is None:
loc = os.environ.get(cls._env_var)
if loc is None:
loc = os.path.expanduser('~')
loc = os.path.join(loc, cls._conf_file)
with open(loc, 'w+') as config_file:
yaml.safe_dump(cls._rules, config_file, default_flow_style=False)
_logger.info('Rule %s saved to %s.', cls.__name__, loc)
@classmethod
def reset_rules(cls):
"""Reset rules.
Delete all current rules. Also clear hash.
"""
cls._rules = {}
cls._hash = {}
def allow(self, name, nspace, impl_type, args, kwargs):
"""Rule inquiry interface.
Check if implementation is allowed.
Parameters
----------
name : str
The dispatch name.
nspace : str
The namespace the dispatch name belongs to.
impl_type : ArrayType
The type of implementation.
args : list
The positional arguments passed to the primitive.
kwargs : dict
The keyword arguments passed to the primitive.
Returns
-------
bool
True if implementation is allowed; False otherwize.
"""
# pylint: disable=too-many-arguments
raise NotImplementedError()
def add(self, name, nspace, impl_type, args, kwargs):
"""Rule registration interface.
Register a new rule based on given info.
Parameters
----------
name : str
The dispatch name.
nspace : str
The namespace of the dispatch name.
impl_type : ArrayType
The type of implementation.
args : list
The positional arguments passed to the primitive.
kwargs : dict
The keyword arguments passed to the primitive.
"""
# pylint: disable=too-many-arguments
raise NotImplementedError()
class Blacklist(Rules):
"""Blacklist rules for rule-based policy"""
def allow(self, name, nspace, impl_type, args, kwargs):
# pylint: disable=too-many-arguments
if impl_type != ArrayType.MXNET:
return True
# For simplicity, this applies to all namespaces.
if name in MXNET_BLACKLIST_OPS:
_logger.debug(
'Rule applies: %s is in internal MXNet op blacklist.', name)
return False
def _is_supported_array_type(var):
if isinstance(var, Array):
# TODO: simplify here when MXNet, NumPy .dtype behavior become
# consistent
return numpy.dtype(var.dtype).name in MXNET_SUPPORT_TYPES
else:
return True
if nspace in self._hash and name in self._hash[nspace] and (
self._hash[nspace][name] is None or self._get_arg_rule_key(
args, kwargs) in self._hash[nspace][name]):
_logger.debug('Rule applies: block by auto-generated rule on %s.',
name)
return False
if name in MXNET_TYPE_COMPATIBLE_OPS:
return True
if not all(_is_supported_array_type(x) for x in args):
_logger.debug(
'Rule applies: contain unsupported type for MXNet op.')
return False
return True
def add(self, name, nspace, impl_type, args, kwargs):
# pylint: disable=too-many-arguments
if impl_type != ArrayType.MXNET:
raise RuleError('This rule only blacklists MXNet ops.')
# Return type sequence
type_seq = lambda args: [self._get_type_signiture(x) for x in args]
self._rules.setdefault(nspace, {})
self._rules[nspace].setdefault(name, [])
self._hash.setdefault(nspace, {})
self._hash[nspace].setdefault(name, set())
if self._get_arg_rule_key(args,
kwargs) not in self._hash[nspace][name]:
entry = {'args': type_seq(args)}
if len(kwargs) > 0:
entry['kwargs'] = list(kwargs.keys())
self._rules[nspace][name].append(entry)
key = self._get_arg_rule_key(args, kwargs)
self._hash[nspace][name].add(key)
_logger.info('New rule %s added.', key)
@classmethod
def _build_hash(cls):
cls._hash = {}
for nspace, ns_rules in cls._rules.items():
cls._hash[nspace] = {}
for key, rules in ns_rules.items():
cls._hash[nspace][key] = set()
for elm in rules:
cls._hash[nspace][key].add('-'.join(elm[
'args']) + '+' + '-'.join(
sorted(elm.get('kwargs', []))))
@staticmethod
def _get_type_signiture(var):
if isinstance(var, Array):
return 'array_dim' + str(var.ndim)
elif isinstance(var, Number):
return type(var.val).__name__
else:
return type(var).__name__
def _get_arg_rule_key(self, args, kwargs):
arg_key = [self._get_type_signiture(x) for x in args]
kwarg_key = sorted(kwargs.keys())
return '-'.join(arg_key) + '+' + '-'.join(kwarg_key)
@classmethod
def query(cls, nspace, name):
"""Query the content of the rule by primitive name.
Parameters
----------
nspace
The namespace of the given primitive.
name : str
Name of the primitive for query
Returns
-------
str
Return the rule content of primitive name.
"""
ns_path = nspace.__name__
if not hasattr(nspace, name):
return '{} has no attribute {}.'.format(ns_path, name)
if ns_path not in cls._rules or name not in cls._rules[ns_path]:
return 'No rule for {} is found in {}.'.format(name, ns_path)
else:
from tabulate import tabulate
rule_list = cls._rules[ns_path][name]
rows = [(i + 1, ', '.join(rule.get('args', [])),
', '.join(rule.get('kwargs', [])))
for i, rule in enumerate(rule_list)]
table = tabulate(
rows,
headers=('No.', 'Type of Positional Args', 'Keyword Args'),
tablefmt='grid')
return 'Total: {} blacklist rules for primitive [{}]:\n'.format(
len(rule_list), name) + table
|
mmdeploy/codebase/mmpose/models/backbones/litehrnet.py | xizi/mmdeploy | 746 | 11074317 | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn.functional as F
from mmdeploy.core import FUNCTION_REWRITER
@FUNCTION_REWRITER.register_rewriter(
'mmpose.models.backbones.litehrnet.CrossResolutionWeighting.forward')
def cross_resolution_weighting__forward(ctx, self, x):
"""Rewrite ``forward`` for default backend.
Rewrite this function to support export ``adaptive_avg_pool2d``.
Args:
x (list): block input.
"""
mini_size = [int(_) for _ in x[-1].shape[-2:]]
out = [F.adaptive_avg_pool2d(s, mini_size) for s in x[:-1]] + [x[-1]]
out = torch.cat(out, dim=1)
out = self.conv1(out)
out = self.conv2(out)
out = torch.split(out, self.channels, dim=1)
out = [
s * F.interpolate(a, size=s.size()[-2:], mode='nearest')
for s, a in zip(x, out)
]
return out
|
examples/visualization/eeg_on_scalp.py | stevemats/mne-python | 1,953 | 11074343 | <gh_stars>1000+
"""
.. _ex-eeg-on-scalp:
=================================
Plotting EEG sensors on the scalp
=================================
In this example, digitized EEG sensor locations are shown on the scalp.
"""
# Author: <NAME> <<EMAIL>>
#
# License: BSD-3-Clause
# %%
import mne
from mne.viz import plot_alignment, set_3d_view
print(__doc__)
data_path = mne.datasets.sample.data_path()
subjects_dir = data_path + '/subjects'
trans = mne.read_trans(data_path + '/MEG/sample/sample_audvis_raw-trans.fif')
raw = mne.io.read_raw_fif(data_path + '/MEG/sample/sample_audvis_raw.fif')
# Plot electrode locations on scalp
fig = plot_alignment(raw.info, trans, subject='sample', dig=False,
eeg=['original', 'projected'], meg=[],
coord_frame='head', subjects_dir=subjects_dir)
# Set viewing angle
set_3d_view(figure=fig, azimuth=135, elevation=80)
|
Algo and DSA/LeetCode-Solutions-master/Python/remove-interval.py | Sourav692/FAANG-Interview-Preparation | 3,269 | 11074347 | # Time: O(n)
# Space: O(1)
class Solution(object):
def removeInterval(self, intervals, toBeRemoved):
"""
:type intervals: List[List[int]]
:type toBeRemoved: List[int]
:rtype: List[List[int]]
"""
A, B = toBeRemoved
return [[x, y] for a, b in intervals
for x, y in ((a, min(A, b)), (max(a, B), b))
if x < y]
|
dash_leaflet/express.py | RenaudLN/dash-leaflet | 125 | 11074349 | <gh_stars>100-1000
import geobuf
import dash_leaflet as dl
import base64
def categorical_colorbar(*args, categories, colorscale, **kwargs):
indices = list(range(len(categories) + 1))
return dl.Colorbar(*args, min=0, max=len(categories), classes=indices, colorscale=colorscale, tooltip=False,
tickValues=[item + 0.5 for item in indices[:-1]], tickText=categories, **kwargs)
def dicts_to_geojson(dicts, lat="lat", lon="lon"):
geojson = {"type": "FeatureCollection", "features": []}
for d in dicts:
feature = {"type": "Feature", "geometry": {"type": "Point", "coordinates": [d[lon], d[lat]]}}
props = [key for key in d.keys() if key not in [lat, lon]]
if props:
feature["properties"] = {prop: d[prop] for prop in props}
geojson["features"].append(feature)
return geojson
def geojson_to_geobuf(geojson):
return base64.b64encode(geobuf.encode(geojson)).decode()
|
testcases/elichika_tests/node/BatchNorm.py | vermashresth/chainer-compiler | 116 | 11074370 | # coding: utf-8
import numpy as np
from chainer_compiler.elichika import testtools
import chainer
import chainer.links as L
class A(chainer.Chain):
def __init__(self):
super(A, self).__init__()
with self.init_scope():
self.l1 = L.BatchNormalization(3)
def forward(self, x):
r = self.l1(x)
return r
# ======================================from MLP
def main():
import numpy as np
np.random.seed(314)
model = A()
v = np.random.rand(2, 3, 5, 5).astype(np.float32)
testtools.generate_testcase(model, [v])
if __name__ == '__main__':
main()
|
python/paddle/fluid/tests/unittests/test_network_with_dtype.py | zmxdream/Paddle | 17,085 | 11074381 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.executor import Executor
BATCH_SIZE = 20
class TestNetWithDtype(unittest.TestCase):
def setUp(self):
self.dtype = "float64"
self.init_dtype()
def run_net_on_place(self, place):
main = fluid.Program()
startup = fluid.Program()
with fluid.program_guard(main, startup):
x = fluid.layers.data(name='x', shape=[13], dtype=self.dtype)
y = fluid.layers.data(name='y', shape=[1], dtype=self.dtype)
y_predict = fluid.layers.fc(input=x, size=1, act=None)
cost = fluid.layers.square_error_cost(input=y_predict, label=y)
avg_cost = fluid.layers.mean(cost)
sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001)
sgd_optimizer.minimize(avg_cost)
fetch_list = [avg_cost]
train_reader = paddle.batch(
paddle.dataset.uci_housing.train(), batch_size=BATCH_SIZE)
feeder = fluid.DataFeeder(place=place, feed_list=[x, y])
exe = fluid.Executor(place)
exe.run(startup)
for data in train_reader():
exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list)
# the main program is runable, the datatype is fully supported
break
def init_dtype(self):
pass
def test_cpu(self):
place = fluid.CPUPlace()
self.run_net_on_place(place)
def test_gpu(self):
if not core.is_compiled_with_cuda():
return
place = fluid.CUDAPlace(0)
self.run_net_on_place(place)
# TODO(dzhwinter): make sure the fp16 is runable
# class TestFloat16(TestNetWithDtype):
# def init_dtype(self):
# self.dtype = "float16"
if __name__ == '__main__':
unittest.main()
|
estimator/iris_classifier.py | manailin/tensorflow___examples | 156 | 11074402 | <filename>estimator/iris_classifier.py
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import urllib
import tensorflow as tf
# Data sets
IRIS_TRAINING = 'iris_training.csv'
IRIS_TRAINING_URL = 'http://download.tensorflow.org/data/iris_training.csv'
IRIS_TEST = 'iris_test.csv'
IRIS_TEST_URL = 'http://download.tensorflow.org/data/iris_test.csv'
FEATURE_KEYS = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width']
def maybe_download_iris_data(file_name, download_url):
"""Downloads the file and returns the number of data."""
if not os.path.exists(file_name):
raw = urllib.urlopen(download_url).read()
with open(file_name, 'w') as f:
f.write(raw)
# The first line is a comma-separated string. The first one is the number of
# total data in the file.
with open(file_name, 'r') as f:
first_line = f.readline()
num_elements = first_line.split(',')[0]
return int(num_elements)
def input_fn(file_name, num_data, batch_size, is_training):
"""Creates an input_fn required by Estimator train/evaluate."""
# If the data sets aren't stored locally, download them.
def _parse_csv(rows_string_tensor):
"""Takes the string input tensor and returns tuple of (features, labels)."""
# Last dim is the label.
num_features = len(FEATURE_KEYS)
num_columns = num_features + 1
columns = tf.decode_csv(rows_string_tensor,
record_defaults=[[]] * num_columns)
features = dict(zip(FEATURE_KEYS, columns[:num_features]))
labels = tf.cast(columns[num_features], tf.int32)
return features, labels
def _input_fn():
"""The input_fn."""
dataset = tf.data.TextLineDataset([file_name])
# Skip the first line (which does not have data).
dataset = dataset.skip(1)
dataset = dataset.map(_parse_csv)
if is_training:
# For this small dataset, which can fit into memory, to achieve true
# randomness, the shuffle buffer size is set as the total number of
# elements in the dataset.
dataset = dataset.shuffle(num_data)
dataset = dataset.repeat()
dataset = dataset.batch(batch_size)
iterator = dataset.make_one_shot_iterator()
features, labels = iterator.get_next()
return features, labels
return _input_fn
def main(unused_argv):
tf.logging.set_verbosity(tf.logging.INFO)
num_training_data = maybe_download_iris_data(
IRIS_TRAINING, IRIS_TRAINING_URL)
num_test_data = maybe_download_iris_data(IRIS_TEST, IRIS_TEST_URL)
# Build 3 layer DNN with 10, 20, 10 units respectively.
feature_columns = [
tf.feature_column.numeric_column(key, shape=1) for key in FEATURE_KEYS]
classifier = tf.estimator.DNNClassifier(
feature_columns=feature_columns, hidden_units=[10, 20, 10], n_classes=3)
# Train.
train_input_fn = input_fn(IRIS_TRAINING, num_training_data, batch_size=32,
is_training=True)
classifier.train(input_fn=train_input_fn, steps=400)
# Eval.
test_input_fn = input_fn(IRIS_TEST, num_test_data, batch_size=32,
is_training=False)
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
# Export the SavedModel file
import shutil
import tempfile
#savemodel_dir = classifier.export_savedmodel(tempfile.mkdtemp(), serving_input_fn = serving_input_fn, as_text = True)
from tensorflow.python.estimator.export import export
#feature_spec = {'MY_FEATURE': tf.constant(2.0, shape=[1, 1])}
# FEATURE_KEYS = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width']
feature_spec = {'sepal_length': tf.constant(2.0, shape=[1, 1]), 'sepal_width': tf.constant(2.0, shape=[1, 1]), 'petal_length': tf.constant(2.0, shape=[1, 1]), 'petal_width': tf.constant(2.0, shape=[1, 1])}
serving_input_fn = export.build_raw_serving_input_receiver_fn(feature_spec)
savemodel_dir = classifier.export_savedmodel(tempfile.mkdtemp(), serving_input_fn, as_text = True)
savemodel_dir = savemodel_dir.decode("UTF-8")
name = "1"
if(os.path.isdir("savedmodel/" + name)):
shutil.rmtree("savedmodel/" + name)
shutil.move(savemodel_dir, "savedmodel/" + name)
if __name__ == '__main__':
tf.app.run()
|
aries_cloudagent/multitenant/tests/test_manager.py | SNU-Blockchain-2021-Fall-Group-H/aries-cloudagent-python | 247 | 11074404 | <gh_stars>100-1000
from asynctest import TestCase as AsyncTestCase
from asynctest import mock as async_mock
from ...core.in_memory import InMemoryProfile
from ...messaging.responder import BaseResponder
from ...wallet.models.wallet_record import WalletRecord
from ..manager import MultitenantManager
class TestMultitenantManager(AsyncTestCase):
async def setUp(self):
self.profile = InMemoryProfile.test_profile()
self.context = self.profile.context
self.responder = async_mock.CoroutineMock(send=async_mock.CoroutineMock())
self.context.injector.bind_instance(BaseResponder, self.responder)
self.manager = MultitenantManager(self.profile)
async def test_get_wallet_profile_returns_from_cache(self):
wallet_record = WalletRecord(wallet_id="test")
self.manager._instances["test"] = InMemoryProfile.test_profile()
with async_mock.patch(
"aries_cloudagent.config.wallet.wallet_config"
) as wallet_config:
profile = await self.manager.get_wallet_profile(
self.profile.context, wallet_record
)
assert profile is self.manager._instances["test"]
wallet_config.assert_not_called()
async def test_get_wallet_profile_not_in_cache(self):
wallet_record = WalletRecord(wallet_id="test", settings={})
self.manager._instances["test"] = InMemoryProfile.test_profile()
self.profile.context.update_settings(
{"admin.webhook_urls": ["http://localhost:8020"]}
)
with async_mock.patch(
"aries_cloudagent.config.wallet.wallet_config"
) as wallet_config:
profile = await self.manager.get_wallet_profile(
self.profile.context, wallet_record
)
assert profile is self.manager._instances["test"]
wallet_config.assert_not_called()
async def test_get_wallet_profile_settings(self):
extra_settings = {"extra_settings": "extra_settings"}
all_wallet_record_settings = [
{
"wallet_record_settings": "wallet_record_settings",
"wallet.dispatch_type": "default",
},
{
"wallet_record_settings": "wallet_record_settings",
"wallet.dispatch_type": "default",
"wallet.webhook_urls": ["https://localhost:8090"],
},
{
"wallet_record_settings": "wallet_record_settings",
"wallet.dispatch_type": "both",
},
{
"wallet_record_settings": "wallet_record_settings",
"wallet.dispatch_type": "both",
"wallet.webhook_urls": ["https://localhost:8090"],
},
]
def side_effect(context, provision):
return (InMemoryProfile(context=context), None)
for idx, wallet_record_settings in enumerate(all_wallet_record_settings):
wallet_record = WalletRecord(
wallet_id=f"test.{idx}",
settings=wallet_record_settings,
)
with async_mock.patch(
"aries_cloudagent.multitenant.manager.wallet_config"
) as wallet_config:
wallet_config.side_effect = side_effect
profile = await self.manager.get_wallet_profile(
self.profile.context, wallet_record, extra_settings
)
assert (
profile.settings.get("wallet_record_settings")
== "wallet_record_settings"
)
assert profile.settings.get("extra_settings") == "extra_settings"
async def test_get_wallet_profile_settings_reset(self):
wallet_record = WalletRecord(
wallet_id="test",
settings={},
)
with async_mock.patch(
"aries_cloudagent.multitenant.manager.wallet_config"
) as wallet_config:
def side_effect(context, provision):
return (InMemoryProfile(context=context), None)
wallet_config.side_effect = side_effect
self.profile.context.update_settings(
{
"wallet.recreate": True,
"wallet.seed": "test_seed",
"wallet.name": "test_name",
"wallet.type": "test_type",
"wallet.rekey": "test_rekey",
"mediation.open": True,
"mediation.invite": "http://invite.com",
"mediation.default_id": "24a96ef5",
"mediation.clear": True,
}
)
profile = await self.manager.get_wallet_profile(
self.profile.context, wallet_record
)
assert profile.settings.get("wallet.recreate") == False
assert profile.settings.get("wallet.seed") == None
assert profile.settings.get("wallet.rekey") == None
assert profile.settings.get("wallet.name") == None
assert profile.settings.get("wallet.type") == None
assert profile.settings.get("mediation.open") == None
assert profile.settings.get("mediation.invite") == None
assert profile.settings.get("mediation.default_id") == None
assert profile.settings.get("mediation.clear") == None
async def test_get_wallet_profile_settings_reset_overwrite(self):
wallet_record = WalletRecord(
wallet_id="test",
settings={
"wallet.recreate": True,
"wallet.seed": "test_seed",
"wallet.name": "test_name",
"wallet.type": "test_type",
"wallet.rekey": "test_rekey",
"mediation.open": True,
"mediation.invite": "http://invite.com",
"mediation.default_id": "24a96ef5",
"mediation.clear": True,
},
)
with async_mock.patch(
"aries_cloudagent.multitenant.manager.wallet_config"
) as wallet_config:
def side_effect(context, provision):
return (InMemoryProfile(context=context), None)
wallet_config.side_effect = side_effect
profile = await self.manager.get_wallet_profile(
self.profile.context, wallet_record
)
assert profile.settings.get("wallet.recreate") == True
assert profile.settings.get("wallet.seed") == "test_seed"
assert profile.settings.get("wallet.rekey") == "test_rekey"
assert profile.settings.get("wallet.name") == "test_name"
assert profile.settings.get("wallet.type") == "test_type"
assert profile.settings.get("mediation.open") == True
assert profile.settings.get("mediation.invite") == "http://invite.com"
assert profile.settings.get("mediation.default_id") == "24a96ef5"
assert profile.settings.get("mediation.clear") == True
|
samples/mgmt_driver/kubespray/kubespray_mgmt.py | h1r0mu/tacker | 116 | 11074416 | # Copyright (C) 2021 FUJITSU
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ipaddress
import json
import os
import time
import eventlet
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import uuidutils
import paramiko
import yaml
from tacker.common import cmd_executer
from tacker.common import exceptions
from tacker.common import log
from tacker.db.db_base import CommonDbMixin
from tacker.db.nfvo import nfvo_db
from tacker.extensions import nfvo
from tacker.nfvo.nfvo_plugin import NfvoPlugin
from tacker import objects
from tacker.vnfm.infra_drivers.openstack import heat_client as hc
from tacker.vnfm.mgmt_drivers import vnflcm_abstract_driver
from tacker.vnfm import vim_client
CHECK_POD_STATUS_RETRY_COUNT = 20
COMMAND_WAIT_COMPLETE_TIME = 0.2
COMMAND_WAIT_RETRY_TIME = 30
CONF = cfg.CONF
CONNECT_REMOTE_SERVER_RETRY_COUNT = 4
DRAIN_TIMEOUT = 300
K8S_CMD_TIMEOUT = 30
K8S_DEPLOY_TIMEOUT = 300
K8S_INSTALL_TIMEOUT = 2700
LOG = logging.getLogger(__name__)
NEXT_CHECK_INTERVAL_TIME = 15
ROLE_MASTER = 'master'
ROLE_WORKER = 'worker'
SERVER_WAIT_COMPLETE_TIME = 240
TOKEN_CREATE_WAIT_TIME = 30
UNINSTALL_NODE_TIMEOUT = 900
class KubesprayMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
def get_type(self):
return 'mgmt-drivers-kubespray'
def get_name(self):
return 'mgmt-drivers-kubespray'
def get_description(self):
return 'Tacker Kubespray VNFMgmt Driver'
@log.log
def instantiate_start(self, context, vnf_instance,
instantiate_vnf_request, grant,
grant_request, **kwargs):
pass
def _get_vim(self, context, vim_connection_info):
vim_client_obj = vim_client.VimClient()
if vim_connection_info:
vim_id = vim_connection_info[0].vim_id
access_info = vim_connection_info[0].access_info
if access_info:
region_name = access_info.get('region')
else:
region_name = None
else:
vim_id = None
region_name = None
try:
vim_res = vim_client_obj.get_vim(
context, vim_id, region_name=region_name)
except nfvo.VimNotFoundException:
raise exceptions.VimConnectionNotFound(vim_id=vim_id)
vim_res['vim_auth'].update({'region': region_name})
vim_info = {'id': vim_res['vim_id'], 'vim_id': vim_res['vim_id'],
'vim_type': vim_res['vim_type'],
'access_info': vim_res['vim_auth']}
return vim_info
def _get_vim_connection_info(self, context, instantiate_vnf_req):
vim_info = self._get_vim(
context, instantiate_vnf_req.vim_connection_info)
vim_connection_info = objects.VimConnectionInfo.obj_from_primitive(
vim_info, context)
return vim_connection_info
def _check_is_cidr(self, key, value, cidr_str):
# instantiate: check cidr
try:
ipaddress.ip_network(cidr_str)
except ValueError:
LOG.error('The {value} of {key} in the '
'additionalParams is invalid.'.format(
value=value, key=key))
raise exceptions.MgmtDriverParamInvalid(param=value)
def _check_value_exist(self, attr_list, value, key):
for attr in attr_list:
if not value.get(attr):
LOG.error(
'The {} of {} in the '
'additionalParams cannot'
' be None.'.format(attr, key))
raise exceptions.MgmtDriverNotFound(
param=attr)
def _check_input_parameters(self, additional_param, vnf_package_path):
if not additional_param:
LOG.error('The kubernetes cluster info cannot be None'
'in additionalParams.')
raise exceptions.MgmtDriverOtherError(
error_message="The kubernetes cluster info"
" cannot be None in additionalParams.")
if not isinstance(additional_param, dict):
LOG.error('The format of kubernetes cluster info in '
'additionalParams is invalid. It must be dict.')
raise exceptions.MgmtDriverOtherError(
error_message="The format of kubernetes cluster info in "
"additionalParams is invalid. It must be dict.")
for key, value in additional_param.items():
attr_list = []
if key not in ('proxy', 'external_lb_param', 'vim_name'):
attr_list.extend(['username', 'password'])
if key in ('master_node', 'worker_node', 'external_lb_param'):
attr_list.extend(['ssh_cp_name'])
if key == 'ansible':
attr_list.extend(['ip_address', 'kubespray_root_path',
'transferring_inventory_path'])
if key == 'external_lb_param':
attr_list.extend(['ssh_username', 'ssh_password',
'script_path'])
if value.get('script_path'):
abs_script_path = os.path.join(
vnf_package_path, value.get('script_path'))
if not os.path.exists(abs_script_path):
LOG.error('The path of external_lb_param'
' script is invalid.')
raise exceptions.MgmtDriverOtherError(
error_message="The path of external_lb_param"
" script is invalid")
if key in ('master_node', 'ansible'):
for attr in ['pod_cidr', 'cluster_cidr', 'ip_address']:
if value.get(attr):
self._check_is_cidr(
key, attr, value.get(attr))
if attr_list:
self._check_value_exist(attr_list, value, key)
def _get_ssh_ip_and_nic_ip(self, heatclient, stack_id, node):
resource_info = heatclient.resources.get(
stack_id=stack_id,
resource_name=node.get('ssh_cp_name'))
if resource_info.attributes.get('floating_ip_address'):
ssh_ip = resource_info.attributes.get('floating_ip_address')
else:
ssh_ip = resource_info.attributes.get(
'fixed_ips')[0].get('ip_address')
if not ssh_ip:
LOG.error("Failed to get the node's ssh ip.")
raise exceptions.MgmtDriverOtherError(
error_message="Failed to get"
" the node's ssh ip.")
if not node.get('nic_cp_name'):
nic_ip = ssh_ip
else:
nic_ip = heatclient.resources.get(
stack_id=stack_id,
resource_name=node.get('nic_cp_name')).attributes.get(
'fixed_ips')[0].get('ip_address')
if not nic_ip:
LOG.error("Failed to get the node's nic ip.")
raise exceptions.MgmtDriverOtherError(
error_message="Failed to get"
" the node's nic ip.")
return ssh_ip, nic_ip
def _get_group_resources_list(
self, heatclient, stack_id, node, additional_params):
# get group resources list
nest_resources_list = heatclient.resources.list(stack_id=stack_id)
group_stack_name = node.get("aspect_id")
group_stack_id = ""
for nest_resources in nest_resources_list:
if nest_resources.resource_name == group_stack_name:
group_stack_id = nest_resources.physical_resource_id
if not group_stack_id:
LOG.error('No stack id {} matching the group was found.'.format(
group_stack_id))
raise exceptions.MgmtDriverOtherError(
error_message='No stack id {} matching the'
' group was found.'.format(group_stack_id))
group_resources_list = heatclient.resources.list(
stack_id=group_stack_id)
return group_resources_list
def _get_install_info_for_k8s_node(self, nest_stack_id, node,
additional_params, heatclient):
# instantiate: get k8s ssh ips
vm_dict_list = []
# get ssh_ip and nic_ip from heat, and set value into vm_dict
if not node.get('aspect_id'):
ssh_ip, nic_ip = self._get_ssh_ip_and_nic_ip(
heatclient, nest_stack_id, node)
vm_dict = {
"ssh_ip": ssh_ip,
"nic_ip": nic_ip
}
vm_dict_list.append(vm_dict)
else:
group_resources_list = self._get_group_resources_list(
heatclient, nest_stack_id, node, additional_params)
for group_resource in group_resources_list:
stack_id = group_resource.physical_resource_id
ssh_ip, nic_ip = self._get_ssh_ip_and_nic_ip(
heatclient, stack_id, node)
vm_dict = {
"ssh_ip": ssh_ip,
"nic_ip": nic_ip
}
vm_dict_list.append(vm_dict)
return vm_dict_list
def _set_lb_info(self, nest_stack_id, external_lb_param, master_node,
heatclient):
# get ssh_ip and cluster_ip from heat, and set value into vm_dict
ssh_ip, _ = self._get_ssh_ip_and_nic_ip(
heatclient, nest_stack_id, external_lb_param)
external_lb_param['pod_cidr'] = master_node.get('pod_cidr', '')
external_lb_param['cluster_cidr'] = master_node.get(
'cluster_cidr', '')
external_lb_param['ssh_ip'] = ssh_ip
external_lb_param['cluster_ip'] = ssh_ip
def _init_commander_and_set_script(self, user, password, host,
timeout, vnf_package_path=None,
script_path=None, token_flag=False):
retry = CONNECT_REMOTE_SERVER_RETRY_COUNT
while retry > 0:
try:
if (vnf_package_path and script_path) or token_flag:
connect = paramiko.Transport(host, 22)
connect.connect(username=user, password=password)
sftp = paramiko.SFTPClient.from_transport(connect)
if vnf_package_path and script_path:
sftp.put(os.path.join(vnf_package_path, script_path),
"/tmp/{}".format(
script_path.replace('Scripts', '')))
if token_flag:
fname = 'create_admin_token.yaml'
sftp.put(os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"../../../samples/mgmt_driver/{}".format(fname)),
"/tmp/{}".format(fname))
connect.close()
commander = cmd_executer.RemoteCommandExecutor(
user=user, password=password, host=host,
timeout=timeout)
return commander
except (exceptions.NotAuthorized, paramiko.SSHException,
paramiko.ssh_exception.NoValidConnectionsError) as e:
LOG.debug(e)
retry -= 1
if retry == 0:
LOG.error(e)
raise exceptions.MgmtDriverOtherError(error_message=e)
time.sleep(SERVER_WAIT_COMPLETE_TIME)
def _send_or_receive_file(self, host, user, password,
remote_file, local_file, operation):
connect = paramiko.Transport(host, 22)
connect.connect(username=user, password=password)
sftp = paramiko.SFTPClient.from_transport(connect)
if operation == 'receive':
sftp.get(remote_file, local_file)
else:
sftp.put(local_file, remote_file)
connect.close()
def _execute_command(self, commander, ssh_command, timeout, type, retry):
eventlet.monkey_patch()
while retry >= 0:
try:
with eventlet.Timeout(timeout, True):
result = commander.execute_command(
ssh_command)
break
except eventlet.timeout.Timeout:
LOG.debug('It is time out, When execute command: '
'{}.'.format(ssh_command))
retry -= 1
if retry < 0:
LOG.error('It is time out, When execute command: '
'{}.'.format(ssh_command))
raise exceptions.MgmtDriverOtherError(
error_message='It is time out, When execute command: '
'{}.'.format(ssh_command))
time.sleep(COMMAND_WAIT_RETRY_TIME)
if type == 'common':
if result.get_return_code() != 0 and result.get_stderr():
err = result.get_stderr()
LOG.error(err)
raise exceptions.MgmtDriverRemoteCommandError(err_info=err)
elif type == 'ansible':
if result.get_return_code() != 0 \
and 'No such file or directory' in result.get_stderr()[0]:
return False
else:
error_message = 'The transferring_inventory_path has ' \
'exists in kubespray server. Please check' \
' your path.'
LOG.error(error_message)
raise exceptions.MgmtDriverRemoteCommandError(
err_info=error_message)
elif type == 'install':
if result.get_return_code() != 0:
for error in result.get_stdout():
if 'Timeout (12s) waiting for ' \
'privilege escalation prompt' in error and \
retry > 0:
self._execute_command(commander, ssh_command,
timeout, 'install', 0)
break
else:
err = result.get_stderr()
LOG.error(err)
raise exceptions.MgmtDriverRemoteCommandError(
err_info=err)
return result.get_stdout()
def _create_hosts_yaml(self, master_node, master_vm_dict_list,
worker_node, worker_vm_dict_list):
hosts_yaml_content = {
'all': {
'hosts': {},
'children': {
'kube_control_plane': {'hosts': {}},
'kube_node': {'hosts': {}},
'etcd': {'hosts': {}},
'k8s_cluster': {
'children': {'kube_control_plane': None,
'kube_node': None}},
'calico_rr': {'hosts': {}}}}}
for master_vm in master_vm_dict_list:
key = 'master' + master_vm.get('nic_ip').split('.')[-1]
hosts_yaml_content['all']['hosts'][key] = {
'ansible_host': master_vm.get('ssh_ip'),
'ip': master_vm.get('nic_ip'),
'ansible_user': master_node.get('username'),
'ansible_password': <PASSWORD>('<PASSWORD>'),
}
hosts_yaml_content['all']['children']['kube_control_plane'][
'hosts'][key] = None
hosts_yaml_content['all']['children']['etcd'][
'hosts'][key] = None
for worker_vm in worker_vm_dict_list:
key = 'worker' + worker_vm.get('nic_ip').split('.')[-1]
hosts_yaml_content['all']['hosts'][key] = {
'ansible_host': worker_vm.get('ssh_ip'),
'ip': worker_vm.get('nic_ip'),
'ansible_user': worker_node.get('username'),
'ansible_password': <PASSWORD>('<PASSWORD>'),
}
hosts_yaml_content['all']['children']['kube_node'][
'hosts'][key] = None
return hosts_yaml_content
def _install_k8s_cluster_and_set_config(
self, master_node, worker_node, proxy, ansible,
external_lb_param, master_vm_dict_list, worker_vm_dict_list):
"""Install Kubernetes Cluster Function
It will use Kubespray which is installed in advance to install
a Kubernetes Cluster.
At present, Kuberspray's version is v2.16.0. You can get detailed
information from the following url.
https://github.com/kubernetes-sigs/kubespray/tree/v2.16.0
"""
# get mtu value
master_commander = self._init_commander_and_set_script(
master_node.get('username'), master_node.get('password'),
master_vm_dict_list[0].get('ssh_ip'), K8S_CMD_TIMEOUT)
ssh_command = "ip a | grep '%(nic_ip)s' -B 2 | " \
"grep 'mtu' | awk '{print $5}'" % \
{'nic_ip': master_vm_dict_list[0].get('nic_ip')}
mtu_value = self._execute_command(
master_commander, ssh_command,
K8S_CMD_TIMEOUT, 'common', 0)[0].replace('\n', '')
calico_veth_mtu = int(mtu_value) - 20
master_commander.close_session()
# create inventory/hosts.yaml
ansible_commander = self._init_commander_and_set_script(
ansible.get('username'), ansible.get('password'),
ansible.get('ip_address'), K8S_CMD_TIMEOUT)
ssh_command = "ls -l {}".format(
ansible.get('transferring_inventory_path'))
file_exists_flag = self._execute_command(
ansible_commander, ssh_command, K8S_CMD_TIMEOUT, 'ansible', 0)
if not file_exists_flag:
ssh_command = 'cp -r {kubespray_root_path}/inventory/sample' \
' {transferring_inventory_path}'.format(
kubespray_root_path=ansible.get(
'kubespray_root_path'),
transferring_inventory_path=ansible.get(
'transferring_inventory_path'))
self._execute_command(
ansible_commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 0)
hosts_yaml_content = self._create_hosts_yaml(
master_node, master_vm_dict_list,
worker_node, worker_vm_dict_list)
local_hosts_yaml_path = '/tmp/hosts.yaml'
with open(local_hosts_yaml_path, 'w', encoding='utf-8') as nf:
yaml.safe_dump(hosts_yaml_content, nf, default_flow_style=False)
remote_hosts_yaml_path = ansible.get(
'transferring_inventory_path') + '/hosts.yaml'
self._send_or_receive_file(
ansible.get('ip_address'), ansible.get('username'),
ansible.get('password'), remote_hosts_yaml_path,
local_hosts_yaml_path, 'send')
# set calico mtu value
calico_file_path = ansible.get(
'transferring_inventory_path') + \
'/group_vars/k8s_cluster/k8s-net-calico.yml'
ssh_command = 'sed -i "s/\\# calico_mtu: 1500/calico_mtu: ' \
'{mtu_value}/g" {calico_file_path}'.format(
mtu_value=mtu_value,
calico_file_path=calico_file_path)
self._execute_command(ansible_commander, ssh_command,
K8S_CMD_TIMEOUT, 'common', 0)
ssh_command = 'sed -i "s/\\# calico_veth_mtu: 1440/calico_veth_mtu:' \
' {calico_veth_mtu}/g" {calico_file_path}'.format(
calico_veth_mtu=calico_veth_mtu,
calico_file_path=calico_file_path)
self._execute_command(ansible_commander, ssh_command,
K8S_CMD_TIMEOUT, 'common', 0)
# set pod and service cidr information
if external_lb_param.get('cluster_cidr') and \
external_lb_param.get('pod_cidr'):
k8s_cluster_file_path = ansible.get(
'transferring_inventory_path') + \
'/group_vars/k8s_cluster/k8s-cluster.yml'
cluster_cidr = external_lb_param.get(
'cluster_cidr').replace('/', '\\/')
ssh_command = 'sed -i "s/kube_service_addresses:' \
' 10.233.0.0\\/18/' \
'kube_service_addresses: {k8s_service_address}/g"' \
' {k8s_cluster_file_path}'.format(
k8s_service_address=cluster_cidr,
k8s_cluster_file_path=k8s_cluster_file_path)
self._execute_command(ansible_commander, ssh_command,
K8S_CMD_TIMEOUT, 'common', 0)
pod_cidr = external_lb_param.get('pod_cidr').replace('/', '\\/')
ssh_command = 'sed -i "s/kube_pods_subnet: 10.233.64.0\\/18/' \
'kube_pods_subnet: {pod_cidr}/g"' \
' {k8s_cluster_file_path}'.format(
pod_cidr=pod_cidr,
k8s_cluster_file_path=k8s_cluster_file_path)
self._execute_command(ansible_commander, ssh_command,
K8S_CMD_TIMEOUT, 'common', 0)
# set proxy
if proxy:
proxy_file_path = ansible.get(
'transferring_inventory_path') + \
'/group_vars/all/all.yml'
http_proxy = proxy.get('http_proxy').replace('/', '\\/')
https_proxy = proxy.get('http_proxy').replace('/', '\\/')
ssh_command = 'sed -i "s/\\# http_proxy: \\"\\"/' \
'http_proxy: {http_proxy}/g"' \
' {proxy_file_path}'.format(
http_proxy=http_proxy,
proxy_file_path=proxy_file_path)
self._execute_command(ansible_commander, ssh_command,
K8S_CMD_TIMEOUT, 'common', 0)
ssh_command = 'sed -i "s/\\# https_proxy: \\"\\"/' \
'https_proxy: {https_proxy}/g"' \
' {proxy_file_path}'.format(
https_proxy=https_proxy,
proxy_file_path=proxy_file_path)
self._execute_command(ansible_commander, ssh_command,
K8S_CMD_TIMEOUT, 'common', 0)
ansible_commander.close_session()
# install k8s cluster
install_timeout = K8S_INSTALL_TIMEOUT * (
len(master_vm_dict_list) + len(worker_vm_dict_list))
ansible_commander = self._init_commander_and_set_script(
ansible.get('username'), ansible.get('password'),
ansible.get('ip_address'), install_timeout)
cluster_yaml_path = ansible.get(
'kubespray_root_path') + '/cluster.yml'
ssh_command = 'ansible-playbook -i {}/hosts.yaml --become' \
' --become-user=root {}'.format(
ansible.get('transferring_inventory_path'),
cluster_yaml_path)
self._execute_command(ansible_commander, ssh_command,
install_timeout, 'install', 1)
ansible_commander.close_session()
# get k8s bearer token
master_commander = self._init_commander_and_set_script(
master_node.get('username'), master_node.get('password'),
master_vm_dict_list[0].get('ssh_ip'), K8S_CMD_TIMEOUT,
token_flag=True)
ssh_command = "sudo kubectl create -f /tmp/create_admin_token.yaml"
self._execute_command(
master_commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 0)
time.sleep(TOKEN_CREATE_WAIT_TIME)
ssh_command = "sudo kubectl get secret -n kube-system " \
"| grep '^admin-token' " \
"| awk '{print $1}' " \
"| xargs -i sudo kubectl get secret {} -n kube-system" \
" -ojsonpath={.data.token} | base64 -d"
bearer_token = self._execute_command(
master_commander, ssh_command,
K8S_CMD_TIMEOUT, 'common', 0)[0].replace('\n', '')
master_commander.close_session()
if os.path.exists(local_hosts_yaml_path):
os.remove(local_hosts_yaml_path)
return bearer_token
def _install_and_set_lb(self, external_lb_param, vnf_package_path, proxy,
master_vm_dict_list, worker_vm_dict_list,
master_node):
lb_commander = self._init_commander_and_set_script(
external_lb_param.get('ssh_username'),
external_lb_param.get('ssh_password'),
external_lb_param.get('ssh_ip'), K8S_DEPLOY_TIMEOUT,
vnf_package_path=vnf_package_path,
script_path=external_lb_param.get('script_path'))
master_ssh_ips_str = ','.join([vm_dict.get(
'nic_ip') for vm_dict in master_vm_dict_list])
worker_ssh_ips_str = ','.join([vm_dict.get(
'nic_ip') for vm_dict in worker_vm_dict_list])
if proxy.get('http_proxy') and proxy.get('https_proxy'):
ssh_command = \
"export http_proxy={http_proxy};" \
"export https_proxy={https_proxy};" \
"bash /tmp/{script_path} " \
"-m {master_ip} -w {worker_ip} ".format(
http_proxy=proxy.get('http_proxy'),
https_proxy=proxy.get('https_proxy'),
master_ip=master_ssh_ips_str,
worker_ip=worker_ssh_ips_str,
script_path=external_lb_param.get(
'script_path').replace('Scripts/', ''))
else:
ssh_command = \
"bash /tmp/{script_path} " \
"-m {master_ip} -w {worker_ip} ".format(
master_ip=master_ssh_ips_str,
worker_ip=worker_ssh_ips_str,
script_path=external_lb_param.get(
'script_path').replace('Scripts/', ''))
self._execute_command(
lb_commander, ssh_command, K8S_DEPLOY_TIMEOUT, 'common', 0)
lb_commander.close_session()
# copy k8s admin configuration file
master_commander = self._init_commander_and_set_script(
master_node.get('username'), master_node.get('password'),
master_vm_dict_list[0].get('ssh_ip'), K8S_CMD_TIMEOUT)
ssh_command = 'sudo cp /etc/kubernetes/admin.conf /tmp/config;' \
'sudo chown $(id -u):$(id -g) /tmp/config'
self._execute_command(master_commander, ssh_command,
K8S_CMD_TIMEOUT, 'common', 0)
ssh_command = "sed -i 's/:6443/:8383/' /tmp/config"
self._execute_command(master_commander, ssh_command,
K8S_CMD_TIMEOUT, 'common', 0)
master_commander.close_session()
remote_admin_file_path = local_admin_file_path = '/tmp/config'
self._send_or_receive_file(
master_vm_dict_list[0].get('ssh_ip'),
master_node.get('username'), master_node.get('password'),
remote_admin_file_path, local_admin_file_path, 'receive')
# send config file to lb server
lb_admin_file_path = '~/.kube/config'
if os.path.exists(local_admin_file_path):
self._send_or_receive_file(
external_lb_param.get('ssh_ip'),
external_lb_param.get('ssh_username'),
external_lb_param.get('ssh_password'),
remote_admin_file_path, local_admin_file_path, 'send')
lb_commander = self._init_commander_and_set_script(
external_lb_param.get('ssh_username'),
external_lb_param.get('ssh_password'),
external_lb_param.get('ssh_ip'), K8S_CMD_TIMEOUT)
ssh_command = "mv {} {}".format(remote_admin_file_path,
lb_admin_file_path)
self._execute_command(lb_commander, ssh_command,
K8S_CMD_TIMEOUT, 'common', 0)
lb_commander.close_session()
if os.path.exists(local_admin_file_path):
os.remove(local_admin_file_path)
def _create_vim(self, context, vnf_instance, external_lb_param,
bearer_token, vim_name):
server = 'https://' + external_lb_param.get('cluster_ip') + ':8383'
vim_info = {
'vim': {
'name': vim_name,
'auth_url': server,
'vim_project': {
'name': 'default'
},
'auth_cred': {
'bearer_token': bearer_token
},
'type': 'kubernetes',
'tenant_id': context.project_id
}
}
try:
nfvo_plugin = NfvoPlugin()
created_vim_info = nfvo_plugin.create_vim(context, vim_info)
except Exception as e:
LOG.error("Failed to register kubernetes vim: {}".format(e))
raise exceptions.MgmtDriverOtherError(
error_message="Failed to register kubernetes vim: {}".format(
e))
id = uuidutils.generate_uuid()
vim_id = created_vim_info.get('id')
vim_type = 'kubernetes'
access_info = {
'auth_url': server
}
vim_connection_info = objects.VimConnectionInfo(
id=id, vim_id=vim_id, vim_type=vim_type,
access_info=access_info, interface_info=None
)
vim_connection_infos = vnf_instance.vim_connection_info
vim_connection_infos.append(vim_connection_info)
vnf_instance.vim_connection_info = vim_connection_infos
vnf_instance.save()
def _get_vnf_package_path(self, context, vnfd_id):
return os.path.join(CONF.vnf_package.vnf_package_csar_path,
self._get_vnf_package_id(context, vnfd_id))
def _get_vnf_package_id(self, context, vnfd_id):
vnf_package = objects.VnfPackageVnfd.get_by_id(context, vnfd_id)
return vnf_package.package_uuid
@log.log
def instantiate_end(self, context, vnf_instance,
instantiate_vnf_request, grant,
grant_request, **kwargs):
# get vim_connect_info
if hasattr(instantiate_vnf_request, 'vim_connection_info'):
vim_connection_info = self._get_vim_connection_info(
context, instantiate_vnf_request)
else:
# In case of healing entire Kubernetes cluster, 'heal_end' method
# will call this method using 'vnf_instance.instantiated_vnf_info'
# as the 'instantiate_vnf_request', but there is no
# 'vim_connection_info' in it, so we should get
# 'vim_connection_info' from 'vnf_instance'.
vim_connection_info = self._get_vim_connection_info(
context, vnf_instance)
additional_param = instantiate_vnf_request.additional_params.get(
'k8s_cluster_installation_param', {})
vim_name = additional_param.get('vim_name')
master_node = additional_param.get('master_node', {})
worker_node = additional_param.get('worker_node', {})
proxy = additional_param.get('proxy', {})
ansible = additional_param.get('ansible', {})
external_lb_param = additional_param.get('external_lb_param', {})
vnf_package_path = self._get_vnf_package_path(
context, vnf_instance.vnfd_id)
self._check_input_parameters(additional_param, vnf_package_path)
nest_stack_id = vnf_instance.instantiated_vnf_info.instance_id
if not vim_name:
vim_name = 'kubernetes_vim_' + vnf_instance.id
# get k8s node vm list
access_info = vim_connection_info.access_info
heatclient = hc.HeatClient(access_info)
master_vm_dict_list = \
self._get_install_info_for_k8s_node(
nest_stack_id, master_node,
instantiate_vnf_request.additional_params,
heatclient)
worker_vm_dict_list = \
self._get_install_info_for_k8s_node(
nest_stack_id, worker_node,
instantiate_vnf_request.additional_params, heatclient)
# set LB vm's info
self._set_lb_info(nest_stack_id, external_lb_param, master_node,
heatclient)
# install k8s_cluster and set config
bearer_token = self._install_k8s_cluster_and_set_config(
master_node, worker_node, proxy, ansible, external_lb_param,
master_vm_dict_list, worker_vm_dict_list)
# Install and set ExternalLB
self._install_and_set_lb(external_lb_param, vnf_package_path, proxy,
master_vm_dict_list, worker_vm_dict_list,
master_node)
# create vim
self._create_vim(context, vnf_instance, external_lb_param,
bearer_token, vim_name)
@log.log
def terminate_start(self, context, vnf_instance,
terminate_vnf_request, grant,
grant_request, **kwargs):
pass
def _get_vim_by_name(self, context, k8s_vim_name):
common_db_api = CommonDbMixin()
result = common_db_api.get_by_name(
context, nfvo_db.Vim, k8s_vim_name)
if not result:
LOG.debug("Cannot find kubernetes "
"vim with name: {}".format(k8s_vim_name))
return result
@log.log
def terminate_end(self, context, vnf_instance,
terminate_vnf_request, grant,
grant_request, **kwargs):
# delete kubernetes vim
k8s_params = vnf_instance.instantiated_vnf_info.additional_params.get(
'k8s_cluster_installation_param', {})
k8s_vim_name = k8s_params.get('vim_name')
if not k8s_vim_name:
k8s_vim_name = 'kubernetes_vim_' + vnf_instance.id
vim_info = self._get_vim_by_name(
context, k8s_vim_name)
if vim_info:
nfvo_plugin = NfvoPlugin()
nfvo_plugin.delete_vim(context, vim_info.id)
for k8s_vim in vnf_instance.vim_connection_info:
if k8s_vim.vim_id == vim_info.id:
vnf_instance.vim_connection_info.remove(k8s_vim)
# delete cluster info on ansible server
_, _, ansible, _ = \
self._get_initial_parameters(
context, vnf_instance, terminate_vnf_request)
commander = self._init_commander_and_set_script(
ansible.get('username'), ansible.get('password'),
ansible.get('ip_address'), K8S_CMD_TIMEOUT)
ssh_command = 'rm -rf {}'.format(
k8s_params.get('ansible').get('transferring_inventory_path'))
self._execute_command(
commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 0)
ssh_command = 'rm -rf ~/.ssh/known_hosts'
self._execute_command(
commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 0)
commander.close_session()
def _update_external_lb(self, external_lb_param, lb_ssh_ip, hostname):
external_lb_commander = self._init_commander_and_set_script(
external_lb_param.get('ssh_username'),
external_lb_param.get('ssh_password'),
lb_ssh_ip,
K8S_CMD_TIMEOUT
)
ssh_command = 'grep -n "{}" /etc/haproxy/haproxy.cfg | ' \
'cut -d : -f 1'.format(hostname)
result = self._execute_command(
external_lb_commander, ssh_command,
K8S_CMD_TIMEOUT, 'common', 0)
if result:
worker_host_num = result[0].replace('\n', '')
if worker_host_num.isdigit():
lb_server_num = int(worker_host_num, base=0)
ssh_command = "sudo sed -i '{}d' " \
"/etc/haproxy/haproxy.cfg" \
.format(lb_server_num)
self._execute_command(
external_lb_commander, ssh_command,
K8S_CMD_TIMEOUT, 'common', 0)
self._restart_haproxy(external_lb_commander)
external_lb_commander.close_session()
def _delete_worker_node_and_update_inventory_file(
self, ansible, worker_node, worker_hostname, operation_type):
update_hosts_yaml_path = ansible.get(
'transferring_inventory_path') + '/hosts.yaml'
self._modify_ansible_user_or_password(update_hosts_yaml_path,
worker_node, ansible)
# remove worker node
ssh_command = "ansible-playbook -i" \
" {}/hosts.yaml" \
" --become --become-user=root " \
"{}/remove-node.yml -e" \
" node={}".format(ansible.get(
'transferring_inventory_path'),
ansible.get('kubespray_root_path'),
worker_hostname)
try:
with eventlet.Timeout(K8S_INSTALL_TIMEOUT, True):
result, code = self._uninstall_worker_node(
ssh_command, ansible)
if code != 0:
msg = 'Fail to remove the worker node {}'.\
format(worker_hostname)
LOG.error(result)
raise exceptions.MgmtDriverOtherError(
error_message=msg)
LOG.debug(result)
except eventlet.timeout.Timeout:
msg = 'It is time out while deleting' \
' the worker node {}'.format(worker_hostname)
LOG.error(msg)
raise exceptions.MgmtDriverOtherError(
error_message=msg)
# Gets the line of rows where worker_hostname resides
if operation_type == 'SCALE':
while True:
commander_k8s = self._init_commander_and_set_script(
ansible.get('username'), ansible.get('password'),
ansible.get('ip_address'), K8S_CMD_TIMEOUT)
ssh_command = 'grep -n "{}" {} | head -1 ' \
'| cut -d : -f 1'\
.format(worker_hostname, update_hosts_yaml_path)
host_name = self._execute_command(
commander_k8s, ssh_command,
K8S_CMD_TIMEOUT, 'common', 0)
if host_name:
host_name_line = host_name[0].replace('\n', '')
if host_name_line.isdigit():
host_name_line = int(host_name_line, base=0)
ssh_command = 'sed -n {}P {}' \
.format(host_name_line + 1,
update_hosts_yaml_path)
is_hosts_or_children = self._execute_command(
commander_k8s, ssh_command,
K8S_CMD_TIMEOUT, 'common', 0)[0]
if "ansible_host" in is_hosts_or_children:
ssh_command = "sed -i '{}, {}d' {}" \
.format(host_name_line,
host_name_line + 4,
update_hosts_yaml_path)
else:
ssh_command = "sed -i " \
"'{}d' {}"\
.format(host_name_line,
update_hosts_yaml_path)
self._execute_command(
commander_k8s, ssh_command,
K8S_CMD_TIMEOUT, 'common', 0)
else:
break
commander_k8s.close_session()
if os.path.exists(update_hosts_yaml_path):
os.remove(update_hosts_yaml_path)
def _uninstall_worker_node(self, ssh_command, ansible):
end_str = ('# ', '$ ', '? ', '% ')
end_flag = False
result_end_flag = False
command_return_code = 0
try:
trans = paramiko.Transport((ansible.get('ip_address'), 22))
trans.start_client()
trans.auth_password(username=ansible.get('username'),
password=<PASSWORD>('password'))
channel = trans.open_session()
channel.settimeout(UNINSTALL_NODE_TIMEOUT)
channel.get_pty()
buff = ''
channel.invoke_shell()
channel.send(ssh_command + '\n')
while True:
time.sleep(COMMAND_WAIT_COMPLETE_TIME)
resp = channel.recv(1024)
resp = resp.decode('utf-8')
buff += resp
if "Type 'yes' to delete nodes" in resp:
channel.send('yes\n')
time.sleep(COMMAND_WAIT_COMPLETE_TIME)
resp = channel.recv(1024)
resp = resp.decode('utf-8')
buff += resp
for end_s in end_str:
if resp.endswith(end_s):
end_flag = True
break
if end_flag:
break
if 'PLAY RECAP' in resp:
result_end_flag = True
if result_end_flag and 'failed=0' not in resp:
command_return_code = 2
channel.close()
trans.close()
return buff, command_return_code
except (exceptions.NotAuthorized, paramiko.SSHException,
paramiko.ssh_exception.NoValidConnectionsError) as e:
LOG.debug(e)
raise exceptions.MgmtDriverOtherError(error_message=e)
def _get_initial_parameters(self, context, vnf_instance, action_request):
vim_connection_info = \
self._get_vim_connection_info(context, vnf_instance)
k8s_cluster_installation_param = \
vnf_instance.instantiated_vnf_info.additional_params.get(
'k8s_cluster_installation_param')
worker_node_default = \
k8s_cluster_installation_param.get('worker_node')
external_lb_param_default = \
k8s_cluster_installation_param.get('external_lb_param')
ansible_default = \
k8s_cluster_installation_param.get('ansible')
# If additional_params exist in action_request
if hasattr(action_request, 'additional_params') and \
action_request.additional_params:
# Get the VM's information from action_request
add_param = action_request. \
additional_params.get('k8s_cluster_installation_param')
if add_param:
worker_node = add_param.get('worker_node', worker_node_default)
external_lb_param = add_param.get('external_lb_param',
external_lb_param_default)
ansible = add_param.get('ansible', ansible_default)
else:
worker_node = worker_node_default
external_lb_param = external_lb_param_default
ansible = ansible_default
else:
worker_node = worker_node_default
external_lb_param = external_lb_param_default
ansible = ansible_default
return worker_node, external_lb_param, ansible, vim_connection_info
def _remove_node_and_update_config_file(
self, worker_hostnames, external_lb_param,
lb_ssh_ip, ansible, worker_node, operation_type):
# Migrate the pod of the worker node
for worker_hostname in worker_hostnames:
# init lb RemoteCommandExecutor
external_lb_commander = self._init_commander_and_set_script(
external_lb_param.get('ssh_username'),
external_lb_param.get('ssh_password'),
lb_ssh_ip,
K8S_CMD_TIMEOUT
)
# check worker_node exist in k8s-cluster
ssh_command = "kubectl get node --no-headers {}" \
" 2> /dev/null".format(worker_hostname)
result = self._execute_command(external_lb_commander,
ssh_command,
K8S_CMD_TIMEOUT,
'common',
0)
if result:
ssh_command = \
"kubectl get pods --field-selector=spec." \
"nodeName={} -o json".format(worker_hostname)
result = self._execute_command(external_lb_commander,
ssh_command,
K8S_CMD_TIMEOUT,
'common',
0)
# Get the names of all pods on the worker node
daemonset_content_str = ''.join(result)
daemonset_content = json.loads(
daemonset_content_str)
ssh_command = "kubectl drain {}" \
" --ignore-daemonsets" \
" --delete-emptydir-data" \
" --timeout={}s".format(worker_hostname,
DRAIN_TIMEOUT)
self._execute_command(external_lb_commander,
ssh_command,
K8S_DEPLOY_TIMEOUT,
'common', 0)
self.evacuate_wait(external_lb_commander,
daemonset_content)
external_lb_commander.close_session()
# Uninstall worker node and update inventory file
self._delete_worker_node_and_update_inventory_file(
ansible, worker_node, worker_hostname, operation_type)
# Update ExternalLB's haproxy
self._update_external_lb(external_lb_param, lb_ssh_ip,
worker_hostname)
ansible_commander = self._init_commander_and_set_script(
ansible.get('username'), ansible.get('password'),
ansible.get('ip_address'), K8S_CMD_TIMEOUT)
ssh_command = 'rm -rf ~/.ssh/known_hosts'
self._execute_command(
ansible_commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 0)
ansible_commander.close_session()
@log.log
def scale_start(self, context, vnf_instance,
scale_vnf_request, grant,
grant_request, **kwargs):
# If the type of scale is SCALE_IN
if scale_vnf_request.type == 'SCALE_IN':
scale_name_list = kwargs.get('scale_name_list')
nest_stack_id = vnf_instance.instantiated_vnf_info.instance_id
resource_name = scale_vnf_request.aspect_id
worker_node, external_lb_param, ansible, vim_connection_info = \
self._get_initial_parameters(
context, vnf_instance, scale_vnf_request)
# Get the ssh ip of LB
heatclient = hc.HeatClient(vim_connection_info.access_info)
resource_info = heatclient.resources. \
get(stack_id=nest_stack_id,
resource_name=external_lb_param.get('ssh_cp_name'))
# If the VM's floating ip is not None
# Get floating ip from resource_info and assign it to ssh ip
lb_ssh_ip = self._get_lb_or_worker_ssh_ip(resource_info, True)
# Get the ip of scale in worker nodes
worker_group_resource = heatclient.resources. \
get(stack_id=nest_stack_id,
resource_name=resource_name)
# if worker_group_resource is None
if not worker_group_resource:
LOG.error("The specified resource was not found.")
raise exceptions.MgmtDriverOtherError(
error_message='The specified resource was not found.')
worker_resource_list = \
heatclient.resources.list(
stack_id=worker_group_resource.physical_resource_id)
worker_ip_dict_list = []
for worker_resource in worker_resource_list:
# If worker_resource.resource_name exists in scale_name_list
if worker_resource.resource_name in scale_name_list:
stack_id = worker_resource.physical_resource_id
# Get the ssh_ip, nic ip of worker node
worker_ssh_ip, worker_nic_ip = self._get_ssh_ip_and_nic_ip(
heatclient, stack_id, worker_node)
# Create worker_ip_dict_list data
ip_dict = {"ssh_ip": worker_ssh_ip,
"nic_ip": worker_nic_ip}
worker_ip_dict_list.append(ip_dict)
# Get the hostname of the scale in worker node.
worker_hostnames = []
for worker_ip_dict in worker_ip_dict_list:
# get worker host names
worker_hostname = \
'worker' + worker_ip_dict.get("nic_ip").split('.')[-1]
worker_hostnames.append(worker_hostname)
self._remove_node_and_update_config_file(
worker_hostnames, external_lb_param,
lb_ssh_ip, ansible, worker_node, 'SCALE')
else:
pass
def evacuate_wait(self, commander, daemonset_content):
wait_flag = True
retry_count = CHECK_POD_STATUS_RETRY_COUNT
while wait_flag and retry_count > 0:
if daemonset_content.get('items'):
ssh_command = "kubectl get pods --all-namespaces -o json"
result = self._execute_command(
commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 3)
pods_list = json.loads(''.join(result)).get('items')
pods_names = [pod.get('metadata', {}).get('name')
for pod in pods_list]
for daemonset in daemonset_content.get('items'):
daemonset_name = daemonset.get('metadata', {}).get('name')
if daemonset_name in pods_names and \
'calico-node' not in daemonset_name and \
'kube-proxy' not in daemonset_name:
break
else:
wait_flag = False
else:
break
if not wait_flag:
break
time.sleep(NEXT_CHECK_INTERVAL_TIME)
retry_count -= 1
def _get_lb_or_worker_ssh_ip(self, resource_info, is_lb):
if resource_info.attributes.get('floating_ip_address'):
ssh_ip = resource_info.attributes.get('floating_ip_address')
else:
ssh_ip = resource_info.attributes. \
get('fixed_ips')[0].get('ip_address')
if ssh_ip is None:
if is_lb:
LOG.error("Failed to get the LB's ssh ip.")
raise exceptions.MgmtDriverOtherError(
error_message="Failed to get the LB's ssh ip.")
LOG.error("Failed to get the Worker's ssh ip.")
raise exceptions.MgmtDriverOtherError(
error_message="Failed to get the Worker's ssh ip.")
return ssh_ip
def _restart_haproxy(self, external_lb_commander):
# restart haproxy
ssh_command = 'sudo systemctl restart haproxy'
self._execute_command(
external_lb_commander, ssh_command,
K8S_CMD_TIMEOUT, 'common', 0)
ssh_command = 'sudo systemctl status haproxy | ' \
'grep Active'
self._execute_command(
external_lb_commander, ssh_command,
K8S_CMD_TIMEOUT, 'common', 0)
def _update_lb_config_file(self, external_lb_param, lb_ssh_ip,
worker_ip_dict_list):
external_lb_commander = self._init_commander_and_set_script(
external_lb_param.get('ssh_username'),
external_lb_param.get('ssh_password'),
lb_ssh_ip,
K8S_CMD_TIMEOUT
)
add_row_data = ''
for worker_ip_dict in worker_ip_dict_list:
worker_host_name = 'worker' + \
worker_ip_dict.get('nic_ip').split('.')[-1]
nic_ip = worker_ip_dict.get('nic_ip')
row_data = ' server {} {} check'.format(
worker_host_name, nic_ip)
add_row_data += row_data + '\\n'
ssh_command = 'grep -n "backend kubernetes-nodeport" ' \
'/etc/haproxy/haproxy.cfg | head -1 | cut -d : -f 1'
result = self._execute_command(external_lb_commander,
ssh_command,
K8S_INSTALL_TIMEOUT,
'common', 0)[0].replace('\n', '')
write_start_row = int(result) + 2
ssh_command = 'sudo sed -i "{}a\\{}" ' \
'/etc/haproxy/haproxy.cfg'.format(
write_start_row, add_row_data)
LOG.debug("ssh_command: {}".format(ssh_command))
self._execute_command(
external_lb_commander, ssh_command,
K8S_INSTALL_TIMEOUT, 'common', 0)
self._restart_haproxy(external_lb_commander)
external_lb_commander.close_session()
def _install_node_and_update_config_file(
self, worker_node, worker_ip_dict_list,
ansible, external_lb_param, lb_ssh_ip):
# check worker_VM can be accessed via ssh
self._init_commander_and_set_script(
worker_node.get('username'), worker_node.get('password'),
worker_ip_dict_list[0].get('ssh_ip'), K8S_CMD_TIMEOUT)
# Install worker node
commander_k8s = self._init_commander_and_set_script(
ansible.get('username'), ansible.get('password'),
ansible.get('ip_address'),
K8S_INSTALL_TIMEOUT * len(worker_ip_dict_list))
facts_yaml_path = ansible.get(
'kubespray_root_path') + '/facts.yml'
ssh_command = "ansible-playbook -i" \
" {}/hosts.yaml" \
" --become --become-user=root {}" \
.format(ansible.get('transferring_inventory_path'),
facts_yaml_path)
self._execute_command(
commander_k8s, ssh_command,
K8S_DEPLOY_TIMEOUT, 'common', 0)
scale_yaml_path = ansible.get(
'kubespray_root_path') + '/scale.yml'
ssh_command = "ansible-playbook -i" \
" {}/hosts.yaml" \
" --become --become-user=root {}".format(
ansible.get('transferring_inventory_path'),
scale_yaml_path)
self._execute_command(
commander_k8s, ssh_command,
K8S_INSTALL_TIMEOUT * len(worker_ip_dict_list),
'install', 0)
commander_k8s.close_session()
# Update ExternalLB's haproxy.cfg
self._update_lb_config_file(
external_lb_param, lb_ssh_ip, worker_ip_dict_list)
@log.log
def scale_end(self, context, vnf_instance,
scale_vnf_request, grant,
grant_request, **kwargs):
if scale_vnf_request.type == 'SCALE_OUT':
scale_out_id_list = kwargs.get('scale_out_id_list')
nest_stack_id = vnf_instance.instantiated_vnf_info.instance_id
worker_node, external_lb_param, ansible, vim_connection_info =\
self._get_initial_parameters(
context, vnf_instance, scale_vnf_request)
heatclient = hc.HeatClient(vim_connection_info.access_info)
# Get the ssh ip of LB
resource_info = heatclient.resources. \
get(stack_id=nest_stack_id,
resource_name=external_lb_param.get('ssh_cp_name'))
lb_ssh_ip = self._get_lb_or_worker_ssh_ip(resource_info, True)
# get scale-out worker's info
worker_ip_dict_list = []
for scale_out_id in scale_out_id_list:
stack_id = scale_out_id
# Get the ssh_ip, nic ip of worker node
worker_ssh_ip, worker_nic_ip = self._get_ssh_ip_and_nic_ip(
heatclient, stack_id, worker_node)
# Create worker_ip_dict_list data
ip_dict = {"ssh_ip": worker_ssh_ip, "nic_ip": worker_nic_ip}
worker_ip_dict_list.append(ip_dict)
# read hosts.yaml file contents
update_hosts_yaml_path = ansible.get(
'transferring_inventory_path') + '/hosts.yaml'
local_hosts_yaml_path = '/tmp/hosts.yaml'
# update hosts.yaml
hosts_content = self._modify_ansible_user_or_password(
update_hosts_yaml_path, worker_node, ansible)
for worker_ip_dict in worker_ip_dict_list:
# Update inventory file
# update hosts.yaml file contents
worker_host_name = 'worker' + \
worker_ip_dict.get('nic_ip').split('.')[-1]
hosts_content['all']['hosts'][worker_host_name] = {
'ansible_host': worker_ip_dict.get('ssh_ip'),
'ip': worker_ip_dict.get('nic_ip'),
'ansible_user': worker_node.get('username'),
'ansible_password': worker_node.get('password')
}
hosts_content['all']['children']['kube_node'][
'hosts'][worker_host_name] = None
LOG.debug("get hosts_content: {}".format(hosts_content))
with open(local_hosts_yaml_path, 'w', encoding='utf-8') as nf:
yaml.safe_dump(hosts_content, nf,
default_flow_style=False)
self._send_or_receive_file(
ansible.get('ip_address'), ansible.get('username'),
ansible.get('password'), update_hosts_yaml_path,
local_hosts_yaml_path, 'send')
# Install worker node adn update configuration file
self._install_node_and_update_config_file(
worker_node, worker_ip_dict_list, ansible,
external_lb_param, lb_ssh_ip)
else:
pass
def _modify_ansible_user_or_password(self, host_path,
worker_node, ansible):
try:
# read hosts.yml
local_hosts_yaml_path = '/tmp/hosts.yaml'
self._send_or_receive_file(
ansible.get('ip_address'), ansible.get('username'),
ansible.get('password'), host_path,
local_hosts_yaml_path, 'receive')
with open(local_hosts_yaml_path, 'r', encoding='utf-8') as f:
file_content = f.read()
hosts_content = yaml.safe_load(file_content)
worker_nodes = hosts_content['all']['children']['kube_node'][
'hosts']
LOG.debug("worker_nodes: {}".format(worker_nodes))
if worker_nodes:
kube_node_hosts_keys = list(worker_nodes.keys())
LOG.debug("kube_node_keys: {}".format(kube_node_hosts_keys))
hosts_key = list(hosts_content['all']['hosts'].keys())
LOG.debug("hosts_key: {}".format(hosts_key))
need_modify = False
for kube_node_hosts in kube_node_hosts_keys:
if kube_node_hosts in hosts_key:
content = \
hosts_content['all']['hosts'][kube_node_hosts]
LOG.debug("get node content: {}".format(content))
ansible_user = content.get("ansible_user")
ansible_password = content.get("ansible_password")
if ansible_user != worker_node.get('username'):
hosts_content['all']['hosts'][kube_node_hosts][
'ansible_user'] = worker_node.get('username')
need_modify = True
if ansible_password != worker_node.get('password'):
hosts_content['all']['hosts'][kube_node_hosts][
'ansible_password'] = \
worker_node.get('password')
need_modify = True
if need_modify:
with open(local_hosts_yaml_path, 'w', encoding='utf-8') \
as nf:
yaml.safe_dump(hosts_content, nf,
default_flow_style=False)
self._send_or_receive_file(
ansible.get('ip_address'), ansible.get('username'),
ansible.get('password'), host_path,
local_hosts_yaml_path, 'send')
return hosts_content
os.remove(local_hosts_yaml_path)
except Exception:
LOG.error('modify ansible_user or ansible_password has error: {}.'
.format(ValueError))
raise exceptions.MgmtDriverOtherError(
error_message='modify user or password has error: {}.'.format(
Exception))
def _get_vnfc_resource_id(self, vnfc_resource_list, vnfc_instance_id):
for vnfc_resource in vnfc_resource_list:
if vnfc_resource.id == vnfc_instance_id:
return vnfc_resource
return None
def _get_heal_physical_resource_ids(self, vnf_instance,
heal_vnf_request):
heal_physical_resource_ids = []
for vnfc_instance_id in heal_vnf_request.vnfc_instance_id:
instantiated_vnf_info = vnf_instance.instantiated_vnf_info
vnfc_resource_info = instantiated_vnf_info.vnfc_resource_info
vnfc_resource = self._get_vnfc_resource_id(
vnfc_resource_info, vnfc_instance_id)
if vnfc_resource:
heal_physical_resource_ids.append(
vnfc_resource.compute_resource.resource_id)
return heal_physical_resource_ids
def _get_heal_worker_node_info(
self, vnf_additional_params, worker_node, heatclient,
nest_stack_id, heal_physical_resource_ids):
worker_ip_dict_list = []
if worker_node.get('aspect_id'):
worker_group_resource_name = worker_node.get('aspect_id')
worker_group_resource = heatclient.resources.get(
stack_id=nest_stack_id,
resource_name=worker_group_resource_name)
if not worker_group_resource:
raise exceptions.MgmtDriverOtherError(
error_message='The specified resource'
' {} was not found.'.format(
worker_group_resource_name))
worker_group_resource_list = heatclient.resources.list(
stack_id=worker_group_resource.physical_resource_id)
for worker_resource in worker_group_resource_list:
lowest_resource_list = heatclient.resources.list(
stack_id=worker_resource.physical_resource_id)
for lowest_resource in lowest_resource_list:
if lowest_resource.resource_type == 'OS::Nova::Server' \
and lowest_resource.physical_resource_id in \
heal_physical_resource_ids:
worker_ssh_ip, worker_nic_ip = \
self._get_ssh_ip_and_nic_ip(
heatclient,
worker_resource.physical_resource_id,
worker_node)
ip_dict = {"nic_ip": worker_nic_ip,
"ssh_ip": worker_ssh_ip}
worker_ip_dict_list.append(ip_dict)
else:
# in case of SOL001 TOSCA-based VNFD with single worker node
resource_list = heatclient.resources.list(
stack_id=nest_stack_id)
for resource in resource_list:
if resource.resource_type == 'OS::Nova::Server' \
and resource.physical_resource_id in \
heal_physical_resource_ids:
worker_ssh_ip, worker_nic_ip = \
self._get_ssh_ip_and_nic_ip(
heatclient, nest_stack_id, worker_node)
ip_dict = {"nic_ip": worker_nic_ip,
"ssh_ip": worker_ssh_ip}
worker_ip_dict_list.append(ip_dict)
# Get the hostname of the deleting worker nodes
worker_hostnames = []
for worker_ip_dict in worker_ip_dict_list:
# get worker host names
worker_hostname = \
'worker' + worker_ip_dict.get("nic_ip").split('.')[-1]
worker_hostnames.append(worker_hostname)
return worker_hostnames, worker_ip_dict_list
@log.log
def heal_start(self, context, vnf_instance,
heal_vnf_request, grant,
grant_request, **kwargs):
vnf_additional_params = \
vnf_instance.instantiated_vnf_info.additional_params
# heal of the entire VNF
if not heal_vnf_request.vnfc_instance_id:
self.terminate_end(context, vnf_instance, heal_vnf_request,
grant, grant_request)
else:
# heal specified with VNFC instances
heal_physical_resource_ids = \
self._get_heal_physical_resource_ids(
vnf_instance, heal_vnf_request)
worker_node, external_lb_param, ansible, vim_connection_info = \
self._get_initial_parameters(
context, vnf_instance, heal_vnf_request)
nest_stack_id = vnf_instance.instantiated_vnf_info.instance_id
# Get the ssh ip of LB
heatclient = hc.HeatClient(vim_connection_info.access_info)
ssh_ip, _ = self._get_ssh_ip_and_nic_ip(
heatclient, nest_stack_id, external_lb_param)
# Get the worker_hostnames to be healed
worker_hostnames, _ = self._get_heal_worker_node_info(
vnf_additional_params, worker_node, heatclient,
nest_stack_id, heal_physical_resource_ids)
# remove_worker_node_from_k8s_cluster and update configuration file
self._remove_node_and_update_config_file(
worker_hostnames, external_lb_param,
ssh_ip, ansible, worker_node, 'HEAL')
@log.log
def heal_end(self, context, vnf_instance,
heal_vnf_request, grant,
grant_request, **kwargs):
vnf_additional_params = \
vnf_instance.instantiated_vnf_info.additional_params
# heal of the entire VNF
if not heal_vnf_request.vnfc_instance_id:
add_param_list = ['master_node', 'worker_node', 'proxy',
'ansible', 'external_lb_param']
for add_param in add_param_list:
if heal_vnf_request.additional_params.get(
'k8s_cluster_installation_param'):
if add_param in heal_vnf_request.additional_params.get(
'k8s_cluster_installation_param'):
vnf_additional_params.get(
'k8s_cluster_installation_param')[add_param] = \
heal_vnf_request.additional_params[
'k8s_cluster_installation_param'].get(
add_param)
heal_vnf_request.additional_params = vnf_additional_params
self.instantiate_end(context, vnf_instance, heal_vnf_request,
grant, grant_request)
else:
# heal specified with VNFC instances
heal_physical_resource_ids = \
self._get_heal_physical_resource_ids(
vnf_instance, heal_vnf_request)
worker_node, external_lb_param, ansible, vim_connection_info = \
self._get_initial_parameters(
context, vnf_instance, heal_vnf_request)
nest_stack_id = vnf_instance.instantiated_vnf_info.instance_id
# Get the ssh ip of LB
heatclient = hc.HeatClient(vim_connection_info.access_info)
ssh_ip, _ = self._get_ssh_ip_and_nic_ip(
heatclient, nest_stack_id, external_lb_param)
# Get the worker_ip_dict_list to be healed
_, worker_ip_dict_list = self._get_heal_worker_node_info(
vnf_additional_params, worker_node, heatclient,
nest_stack_id, heal_physical_resource_ids)
# Install worker node and update configuration file
self._install_node_and_update_config_file(
worker_node, worker_ip_dict_list, ansible,
external_lb_param, ssh_ip)
@log.log
def change_external_connectivity_start(
self, context, vnf_instance,
change_ext_conn_request, grant,
grant_request, **kwargs):
pass
@log.log
def change_external_connectivity_end(
self, context, vnf_instance,
change_ext_conn_request, grant,
grant_request, **kwargs):
pass
|
statsmodels/examples/tut_ols_ancova.py | madhushree14/statsmodels | 6,931 | 11074418 | <gh_stars>1000+
'''Examples OLS
Note: uncomment plt.show() to display graphs
Summary:
========
Relevant part of construction of design matrix
xg includes group numbers/labels,
x1 is continuous explanatory variable
>>> dummy = (xg[:,None] == np.unique(xg)).astype(float)
>>> X = np.c_[x1, dummy[:,1:], np.ones(nsample)]
Estimate the model
>>> res2 = sm.OLS(y, X).fit()
>>> print res2.params
[ 1.00901524 3.08466166 -2.84716135 9.94655423]
>>> print res2.bse
[ 0.07499873 0.71217506 1.16037215 0.38826843]
>>> prstd, iv_l, iv_u = wls_prediction_std(res2)
"Test hypothesis that all groups have same intercept"
>>> R = [[0, 1, 0, 0],
... [0, 0, 1, 0]]
>>> print res2.f_test(R)
<F test: F=array([[ 91.69986847]]), p=[[ 8.90826383e-17]], df_denom=46, df_num=2>
strongly rejected because differences in intercept are very large
'''
import numpy as np
import statsmodels.api as sm
import matplotlib.pyplot as plt
from statsmodels.sandbox.regression.predstd import wls_prediction_std
#fix a seed for these examples
np.random.seed(98765789)
#OLS with dummy variables, similar to ANCOVA
#-------------------------------------------
#construct simulated example:
#3 groups common slope but different intercepts
nsample = 50
x1 = np.linspace(0, 20, nsample)
sig = 1.
#suppose observations from 3 groups
xg = np.zeros(nsample, int)
xg[20:40] = 1
xg[40:] = 2
#print xg
dummy = (xg[:,None] == np.unique(xg)).astype(float)
#use group 0 as benchmark
X = np.c_[x1, dummy[:,1:], np.ones(nsample)]
beta = [1., 3, -3, 10]
y_true = np.dot(X, beta)
y = y_true + sig * np.random.normal(size=nsample)
#estimate
#~~~~~~~~
res2 = sm.OLS(y, X).fit()
#print "estimated parameters: x d1-d0 d2-d0 constant"
print(res2.params)
#print "standard deviation of parameter estimates"
print(res2.bse)
prstd, iv_l, iv_u = wls_prediction_std(res2)
#print res.summary()
#plot
#~~~~
plt.figure()
plt.plot(x1, y, 'o', x1, y_true, 'b-')
plt.plot(x1, res2.fittedvalues, 'r--.')
plt.plot(x1, iv_u, 'r--')
plt.plot(x1, iv_l, 'r--')
plt.title('3 groups: different intercepts, common slope; blue: true, red: OLS')
plt.show()
#Test hypothesis that all groups have same intercept
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
R = [[0, 1, 0, 0],
[0, 0, 1, 0]]
# F test joint hypothesis R * beta = 0
# i.e. coefficient on both dummy variables equal zero
print("Test hypothesis that all groups have same intercept")
print(res2.f_test(R))
|
database/provisioning/create_teams.py | aprilsanchez/ictf-framework | 110 | 11074429 |
import os
import random
import requests
import sys
import string
import yaml
import json
rnd = random.SystemRandom()
# THIS PATH ARE RELATIVE TO WHERE TERRAFORM WILL BE LAUNCHED BECAUSE
# THEY ARE USED IN THE PROVISIONING SECTION.
PROV_FOLDER = os.path.dirname(os.path.abspath(__file__))
SECRETS_FOLDER = os.path.join(PROV_FOLDER, '../../secrets/')
VM_PROVISIONED_NAME = "database"
def get_all_teams(db_api_base_url, db_secret):
teams_info_url = db_api_base_url + "/teams/info"
print(teams_info_url)
result = requests.get(teams_info_url, params={'secret': db_secret})
try:
response = result.json()
except Exception as ex:
raise Exception("Invalid JSON returned from /teams/info endpoint: {}".format(result.content)) from ex
print(response)
return response['teams']
def create_team(db_api_base_url, db_secret, team_id, name, country, logo, team_email, team_password, url,
academic_team, login_token, flag_token):
params = {'secret': db_secret}
data = {"name": name, "country": country, "logo": logo, "team_email": team_email, "team_password": <PASSWORD>_password,
"url": url, "academic_team": academic_team, "id": team_id,
"login_token": login_token, "flag_token": flag_token}
result = requests.post(db_api_base_url + "/team/add_direct", data=data, params=params)
print(result.content)
response = result.json()
if response['result'] == "success":
return response['team_id']
else:
print("ERROR %s" % response['fail_reason'])
raise Exception("Couldn't create team id %s name %s." % (team_id, name))
def validate_team(db_api_base_url, db_secret, new_team_id):
update_team_url = db_api_base_url + "/team/update/{}".format(new_team_id)
data = {"validated": '1'}
result = requests.post(update_team_url, data=data, params={'secret': db_secret})
response = result.json()
if response['result'] == "success":
print("Team %s successfully created and validated" % new_team_id)
else:
print("Team %s could not be validated!!!!!" % new_team_id)
def add_teams_info(db_api_base_url, db_secret, game_config):
# get all the teams in the DB that are currently created
current_teams = get_all_teams(db_api_base_url, db_secret)
for team in game_config['teams']:
# check to see if this team is already in the DB, if so let's skip
if str(team['id']) in current_teams:
cur_team = current_teams[str(team['id'])]
if cur_team['name'] != team['name']:
print("Already a team in the DB with the requested id %s but with a different name %s != %s" % (team['id'], team['name'], cur_team['name']))
continue
if cur_team['email'] != team['email']:
print("Already a team in the DB with the requested id %s but with different email %s != %s" % (team['id'], team['email'], cur_team['email']))
continue
print("Team %s already exists in the DB, but it looks like all the info is OK" % (team['id'],))
continue
gm_token = ''.join(rnd.choice(string.ascii_letters + string.digits) for _ in range(16))
login_token = ''.join(rnd.choice(string.ascii_letters + string.digits) for _ in range(32)) # must get rid of this shit
flag_token = team['flag_token']
team_id = create_team(db_api_base_url, db_secret, team['id'], team['name'], team['country'], team['logo'], team['email'],
gm_token, team['url'], team['academic_team'], login_token, flag_token)
if team_id:
validate_team(db_api_base_url, db_secret, team_id)
else:
# The previous code was just assuming that teams would get added with increasing IDs, and had no error checking if the team couldn't be created
pass
# add all services
if __name__== "__main__":
game_config = json.load(open(sys.argv[2], 'r'))
db_api = sys.argv[1] # passed from terraform script
database_api_secret_path = SECRETS_FOLDER+"database-api/secret"
if os.path.isfile(database_api_secret_path):
f = open(database_api_secret_path, "r")
database_api_secret = f.read().rstrip()
db_secret = database_api_secret # to read from the folder "secrets" generated by the make_secret.sh script
add_teams_info('http://' + db_api, db_secret, game_config)
else:
raise Exception("Missing database secrets!")
|
src/greynoise/cli/decorator.py | aaronsdevera/pygreynoise | 127 | 11074433 | """CLI subcommand decorators.
Decorators used to add common functionality to subcommands.
"""
import functools
import click
import structlog
from requests.exceptions import RequestException
from greynoise.api import GreyNoise
from greynoise.cli.formatter import FORMATTERS
from greynoise.cli.parameter import ip_addresses_parameter
from greynoise.exceptions import RequestFailure
from greynoise.util import load_config
LOGGER = structlog.get_logger()
def echo_result(function):
"""Decorator that prints subcommand results correctly formatted.
:param function: Subcommand that returns a result from the API.
:type function: callable
:returns: Wrapped function that prints subcommand results
:rtype: callable
"""
@functools.wraps(function)
def wrapper(*args, **kwargs):
result = function(*args, **kwargs)
context = click.get_current_context()
params = context.params
output_format = params["output_format"]
formatter = FORMATTERS[output_format]
if isinstance(formatter, dict):
# For the text formatter, there's a separate formatter for each subcommand
formatter = formatter[context.command.name]
output = formatter(result, params.get("verbose", False)).strip("\n")
click.echo(
output, file=params.get("output_file", click.open_file("-", mode="w"))
)
return wrapper
def handle_exceptions(function):
"""Print error and exit on API client exception.
:param function: Subcommand that returns a result from the API.
:type function: callable
:returns: Wrapped function that prints subcommand results
:rtype: callable
"""
@functools.wraps(function)
def wrapper(*args, **kwargs):
try:
return function(*args, **kwargs)
except RequestFailure as exception:
body = exception.args[1]
if "message" in body:
error_message = "API error: {}".format(body["message"])
elif "error" in body:
error_message = "API error: {}".format(body["error"])
else:
error_message = "API error: {}".format(body)
LOGGER.error(error_message)
click.echo(error_message)
click.get_current_context().exit(-1)
except RequestException as exception:
error_message = "API error: {}".format(exception)
LOGGER.error(error_message)
click.echo(error_message)
click.get_current_context().exit(-1)
return wrapper
def pass_api_client(function):
"""Create API client form API key and pass it to subcommand.
:param function: Subcommand that returns a result from the API.
:type function: callable
:returns: Wrapped function that prints subcommand results
:rtype: callable
"""
@functools.wraps(function)
def wrapper(*args, **kwargs):
context = click.get_current_context()
api_key = context.params.get("api_key")
offering = context.params.get("offering")
config = load_config()
if api_key is None:
if not config["api_key"]:
prog_name = context.parent.info_name
click.echo(
"\nError: API key not found.\n\n"
"To fix this problem, please use any of the following methods "
"(in order of precedence):\n"
"- Pass it using the -k/--api-key option.\n"
"- Set it in the GREYNOISE_API_KEY environment variable.\n"
"- Run {!r} to save it to the configuration file.\n".format(
"{} setup".format(prog_name)
)
)
context.exit(-1)
api_key = config["api_key"]
if offering is None:
if not config["offering"]:
offering = "enterprise"
else:
offering = config["offering"]
api_client = GreyNoise(
api_key=api_key,
offering=offering,
timeout=config["timeout"],
integration_name="cli",
)
return function(api_client, *args, **kwargs)
return wrapper
def gnql_command(function):
"""Decorator that groups decorators common to gnql query and stats subcommands."""
@click.command()
@click.argument("query", required=False)
@click.option("-k", "--api-key", help="Key to include in API requests")
@click.option(
"-O",
"--offering",
help="Which API offering to use, enterprise or community, "
"defaults to enterprise",
)
@click.option("-i", "--input", "input_file", type=click.File(), help="Input file")
@click.option(
"-o", "--output", "output_file", type=click.File(mode="w"), help="Output file"
)
@click.option(
"-f",
"--format",
"output_format",
type=click.Choice(["json", "txt", "xml"]),
default="txt",
help="Output format",
)
@click.option("-v", "--verbose", count=True, help="Verbose output")
@pass_api_client
@click.pass_context
@echo_result
@handle_exceptions
@functools.wraps(function)
def wrapper(*args, **kwargs):
return function(*args, **kwargs)
return wrapper
def ip_lookup_command(function):
"""Decorator that groups decorators common to ip and quick subcommand."""
@click.command()
@click.argument("ip_address", callback=ip_addresses_parameter, nargs=-1)
@click.option("-k", "--api-key", help="Key to include in API requests")
@click.option(
"-O",
"--offering",
help="Which API offering to use, enterprise or community, "
"defaults to enterprise",
)
@click.option("-i", "--input", "input_file", type=click.File(), help="Input file")
@click.option(
"-o", "--output", "output_file", type=click.File(mode="w"), help="Output file"
)
@click.option(
"-f",
"--format",
"output_format",
type=click.Choice(["json", "txt", "xml"]),
default="txt",
help="Output format",
)
@pass_api_client
@click.pass_context
@echo_result
@handle_exceptions
@functools.wraps(function)
def wrapper(*args, **kwargs):
return function(*args, **kwargs)
return wrapper
class SubcommandNotImplemented(click.ClickException):
"""Exception used temporarily for subcommands that have not been implemented.
:param subcommand_name: Name of the subcommand to display in the error message.
:type subcommand_function: str
"""
def __init__(self, subcommand_name):
message = "{!r} subcommand is not implemented yet.".format(subcommand_name)
super(SubcommandNotImplemented, self).__init__(message)
def not_implemented_command(function):
"""Decorator that sends requests for not implemented commands."""
@click.command()
@pass_api_client
@functools.wraps(function)
def wrapper(api_client, *args, **kwargs):
command_name = function.__name__
try:
api_client.not_implemented(command_name)
except RequestFailure:
raise SubcommandNotImplemented(command_name)
return wrapper
|
model/__init__.py | leoriohope/RandWireNN | 757 | 11074435 | <reponame>leoriohope/RandWireNN
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 8 08:22:37 2019
@author: Michael
"""
|
setup.py | awaisbinadil/colour-checker-detection | 116 | 11074438 | # -*- coding: utf-8 -*-
import codecs
from setuptools import setup
packages = \
['colour_checker_detection',
'colour_checker_detection.detection',
'colour_checker_detection.detection.tests']
package_data = \
{'': ['*'],
'colour_checker_detection': ['examples/*',
'resources/colour-checker-detection-examples-datasets/*',
'resources/colour-checker-detection-tests-datasets/*']}
install_requires = \
['colour-science>=0.3.16,<0.4.0', 'opencv-python>=4,<5']
extras_require = \
{'development': ['biblib-simple',
'coverage',
'coveralls',
'flake8',
'invoke',
'jupyter',
'matplotlib',
'mock',
'nose',
'pre-commit',
'pytest',
'restructuredtext-lint',
'sphinx<=3.1.2',
'sphinx_rtd_theme',
'sphinxcontrib-bibtex',
'toml',
'twine',
'yapf==0.23'],
'read-the-docs': ['mock', 'numpy', 'sphinxcontrib-bibtex']}
setup(
name='colour-checker-detection',
version='0.1.2',
description='Colour checker detection with Python',
long_description=codecs.open('README.rst', encoding='utf8').read(),
author='<NAME>',
author_email='<EMAIL>',
maintainer='Colour Developers',
maintainer_email='<EMAIL>',
url='https://www.colour-science.org/',
packages=packages,
package_data=package_data,
install_requires=install_requires,
extras_require=extras_require,
python_requires='>=3.6,<4.0',
)
|
pose_trackers/lighttrack/graph/torchlight/torchlight/__init__.py | rcourivaud/video-to-pose3D | 574 | 11074445 | <filename>pose_trackers/lighttrack/graph/torchlight/torchlight/__init__.py
from .gpu import ngpu
from .gpu import occupy_gpu
from .gpu import visible_gpu
from .io import DictAction
from .io import IO
from .io import import_class
from .io import str2bool
from .io import str2dict
|
scrounger/utils/config.py | NORD-Function/IOS-tools | 217 | 11074494 | <reponame>NORD-Function/IOS-tools
"""
Module with scrounger configurations
"""
# get home directory
from os import getenv
# Logging
import logging as _logging
#_LOGGING_FORMAT = "%(asctime)17s - %(module)8s.%(funcName).10s : %(message)s"
_LOGGING_FORMAT = "%(asctime)17s - %(module)30s : %(message)s"
_LOGGING_TIME_FORMAT = "%Y-%m-%d %H:%M:%S"
_formatter = _logging.Formatter(_LOGGING_FORMAT, _LOGGING_TIME_FORMAT)
_handler = _logging.StreamHandler()
_handler.setFormatter(_formatter)
Log = _logging.getLogger("scrounger")
"""
Variable to be used when logging is necessary
"""
Log.addHandler(_handler)
Log.setLevel(_logging.INFO)
#Log.setLevel(_logging.DEBUG)
# Binary found memory
binary_memory = {
"binary": [],
"ios": {},
"ios_packages": {},
"android": {},
}
# Constants
SSH_SESSION_TIMEOUT = 60*5 # 5 minutes
SSH_COMMAND_TIMEOUT = 30 # 30 seconds
_BANNER = """
_____
/ ____|
| (___ ___ _ __ ___ _ _ _ __ __ _ ___ _ __
\___ \ / __| '__/ _ \| | | | '_ \ / _` |/ _ \ '__|
____) | (__| | | (_) | |_| | | | | (_| | __/ |
|_____/ \___|_| \___/ \__,_|_| |_|\__, |\___|_|
__/ |
|___/
"""
_VERSION = "0.2.0"
_HOME = getenv('HOME')
_SCROUNGER_HOME_NAME = ".scrounger"
_SCROUNGER_HOME = "{}/{}".format(_HOME, _SCROUNGER_HOME_NAME)
_HISTORY_FILE = "{}/history".format(_SCROUNGER_HOME)
_SESSION_FILE = "{}/sessions".format(_SCROUNGER_HOME)
_MAX_HISTORY = 1000
_CERT_PATH = "{}/certificates".format(_SCROUNGER_HOME) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.