id
stringlengths 2
8
| text
stringlengths 16
264k
| dataset_id
stringclasses 1
value |
---|---|---|
3262699
|
<gh_stars>10-100
import unittest
import collections
from fontParts.base import FontPartsError
class TestGuideline(unittest.TestCase):
def getGuideline_generic(self):
guideline, _ = self.objectGenerator("guideline")
guideline.x = 1
guideline.y = 2
guideline.angle = 90
guideline.name = "Test Guideline"
return guideline
def getGuideline_fontGuideline(self):
font, _ = self.objectGenerator("font")
guideline = font.appendGuideline((1, 2), 90, "Test Guideline Font")
return guideline
def getGuideline_glyphGuideline(self):
font, _ = self.objectGenerator("font")
layer = font.newLayer("L")
glyph = layer.newGlyph("X")
guideline = glyph.appendGuideline((1, 2), 90, "Test Guideline Glyph")
return guideline
# ----
# repr
# ----
def test_reprContents(self):
guideline = self.getGuideline_generic()
value = guideline._reprContents()
self.assertIsInstance(value, list)
for i in value:
self.assertIsInstance(i, str)
def test_reprContents_noGlyph(self):
guideline, _ = self.objectGenerator("guideline")
value = guideline._reprContents()
self.assertIsInstance(value, list)
for i in value:
self.assertIsInstance(i, str)
def test_reprContents_Layer(self):
guideline = self.getGuideline_glyphGuideline()
value = guideline._reprContents()
self.assertIsInstance(value, list)
for i in value:
self.assertIsInstance(i, str)
# --------
# Attributes
# --------
# x
def test_x_get_generic(self):
guideline = self.getGuideline_generic()
self.assertEqual(
guideline.x,
1
)
def test_x_get_fontGuideline(self):
guideline = self.getGuideline_fontGuideline()
self.assertEqual(
guideline.x,
1
)
def test_x_get_glyphGuideline(self):
guideline = self.getGuideline_glyphGuideline()
self.assertEqual(
guideline.x,
1
)
def test_x_set_valid_zero_generic(self):
guideline = self.getGuideline_generic()
guideline.x = 0
self.assertEqual(
guideline.x,
0
)
def test_x_set_valid_zero_fontGuideline(self):
guideline = self.getGuideline_fontGuideline()
guideline.x = 0
self.assertEqual(
guideline.x,
0
)
def test_x_set_valid_zero_glyphGuideline(self):
guideline = self.getGuideline_glyphGuideline()
guideline.x = 0
self.assertEqual(
guideline.x,
0
)
def test_x_set_valid_positive(self):
guideline = self.getGuideline_generic()
guideline.x = 1
self.assertEqual(
guideline.x,
1
)
def test_x_set_valid_negative(self):
guideline = self.getGuideline_generic()
guideline.x = -1
self.assertEqual(
guideline.x,
-1
)
def test_x_set_valid_positive_float(self):
guideline = self.getGuideline_generic()
guideline.x = 1.1
self.assertEqual(
guideline.x,
1.1
)
def test_x_set_valid_negative_float(self):
guideline = self.getGuideline_generic()
guideline.x = -1.1
self.assertEqual(
guideline.x,
-1.1
)
def test_x_set_valid_None(self):
guideline = self.getGuideline_generic()
guideline.x = None
self.assertEqual(
guideline.x,
0
)
def test_x_set_invalid_string(self):
guideline = self.getGuideline_generic()
with self.assertRaises(TypeError):
guideline.x = "ABC"
# y
def test_y_get_generic(self):
guideline = self.getGuideline_generic()
self.assertEqual(
guideline.y,
2
)
def test_y_get_fontGuideline(self):
guideline = self.getGuideline_fontGuideline()
self.assertEqual(
guideline.y,
2
)
def test_y_get_glyphGuideline(self):
guideline = self.getGuideline_glyphGuideline()
self.assertEqual(
guideline.y,
2
)
def test_y_set_valid_zero_generic(self):
guideline = self.getGuideline_generic()
guideline.y = 0
self.assertEqual(
guideline.y,
0
)
def test_y_set_valid_zero_fontGuideline(self):
guideline = self.getGuideline_fontGuideline()
guideline.y = 0
self.assertEqual(
guideline.y,
0
)
def test_y_set_valid_zero_glyphGuideline(self):
guideline = self.getGuideline_glyphGuideline()
guideline.y = 0
self.assertEqual(
guideline.y,
0
)
def test_y_set_valid_positive(self):
guideline = self.getGuideline_generic()
guideline.y = 1
self.assertEqual(
guideline.y,
1
)
def test_y_set_valid_negative(self):
guideline = self.getGuideline_generic()
guideline.y = -1
self.assertEqual(
guideline.y,
-1
)
def test_y_set_valid_positive_float(self):
guideline = self.getGuideline_generic()
guideline.y = 1.1
self.assertEqual(
guideline.y,
1.1
)
def test_y_set_valid_negative_float(self):
guideline = self.getGuideline_generic()
guideline.y = -1.1
self.assertEqual(
guideline.y,
-1.1
)
def test_y_set_valid_None(self):
guideline = self.getGuideline_generic()
guideline.y = None
self.assertEqual(
guideline.y,
0
)
def test_y_set_invalid_string(self):
guideline = self.getGuideline_generic()
with self.assertRaises(TypeError):
guideline.y = "ABC"
# angle
def test_angle_get_generic(self):
guideline = self.getGuideline_generic()
self.assertEqual(
guideline.angle,
90
)
def test_angle_get_fontGuideline(self):
guideline = self.getGuideline_fontGuideline()
self.assertEqual(
guideline.angle,
90
)
def test_angle_get_glyphGuideline(self):
guideline = self.getGuideline_glyphGuideline()
self.assertEqual(
guideline.angle,
90
)
def test_angle_set_valid_zero_generic(self):
guideline = self.getGuideline_generic()
guideline.angle = 0
self.assertEqual(
guideline.angle,
0
)
def test_angle_set_valid_zero_fontGuideline(self):
guideline = self.getGuideline_fontGuideline()
guideline.angle = 0
self.assertEqual(
guideline.angle,
0
)
def test_angle_set_valid_zero_glyphGuideline(self):
guideline = self.getGuideline_glyphGuideline()
guideline.angle = 0
self.assertEqual(
guideline.angle,
0
)
def test_angle_set_valid_positive(self):
guideline = self.getGuideline_generic()
guideline.angle = 10
self.assertEqual(
guideline.angle,
10
)
def test_angle_set_valid_negative(self):
guideline = self.getGuideline_generic()
guideline.angle = -10
self.assertEqual(
guideline.angle,
350
)
def test_angle_set_valid_positive_float(self):
guideline = self.getGuideline_generic()
guideline.angle = 10.1
self.assertEqual(
guideline.angle,
10.1
)
def test_angle_set_valid_negative_float(self):
guideline = self.getGuideline_generic()
guideline.angle = -10.1
self.assertEqual(
guideline.angle,
349.9
)
def test_angle_set_valid_positive_edge(self):
guideline = self.getGuideline_generic()
guideline.angle = 360
self.assertEqual(
guideline.angle,
360
)
def test_angle_set_valid_negative_edge(self):
guideline = self.getGuideline_generic()
guideline.angle = -360
self.assertEqual(
guideline.angle,
0
)
def test_angle_set_valid_None(self):
guideline = self.getGuideline_generic()
guideline.angle = None
self.assertEqual(
guideline.angle,
0
)
def test_angle_set_invalid_positive_edge(self):
guideline = self.getGuideline_generic()
with self.assertRaises(ValueError):
guideline.angle = 361
def test_angle_set_invalid_negative_edge(self):
guideline = self.getGuideline_generic()
with self.assertRaises(ValueError):
guideline.angle = -361
def test_angle_set_invalid_string(self):
guideline = self.getGuideline_generic()
with self.assertRaises(TypeError):
guideline.angle = "ABC"
def test_angle_set_valid_none_x0_y0(self):
guideline = self.getGuideline_generic()
guideline.x = 0
guideline.y = 0
guideline.angle = None
self.assertEqual(
guideline.angle,
0
)
def test_angle_set_valid_none_x1_y0(self):
guideline = self.getGuideline_generic()
guideline.x = 1
guideline.y = 0
guideline.angle = None
self.assertEqual(
guideline.angle,
90
)
def test_angle_set_valid_none_x0_y1(self):
guideline = self.getGuideline_generic()
guideline.x = 0
guideline.y = 1
guideline.angle = None
self.assertEqual(
guideline.angle,
0
)
def test_angle_set_valid_none_x1_y1(self):
guideline = self.getGuideline_generic()
guideline.x = 1
guideline.y = 1
guideline.angle = None
self.assertEqual(
guideline.angle,
0
)
# index
def getGuideline_index(self):
glyph, _ = self.objectGenerator("glyph")
glyph.appendGuideline((0, 0), 90, "guideline 0")
glyph.appendGuideline((0, 0), 90, "guideline 1")
glyph.appendGuideline((0, 0), 90, "guideline 2")
return glyph
def test_get_index_noParent(self):
guideline, _ = self.objectGenerator("guideline")
self.assertIsNone(guideline.index)
def test_get_index(self):
glyph = self.getGuideline_index()
for i, guideline in enumerate(glyph.guidelines):
self.assertEqual(guideline.index, i)
def test_set_index_noParent(self):
guideline, _ = self.objectGenerator("guideline")
with self.assertRaises(FontPartsError):
guideline.index = 1
def test_set_index_positive(self):
glyph = self.getGuideline_index()
guideline = glyph.guidelines[0]
with self.assertRaises(FontPartsError):
guideline.index = 2
def test_set_index_negative(self):
glyph = self.getGuideline_index()
guideline = glyph.guidelines[1]
with self.assertRaises(FontPartsError):
guideline.index = -1
# name
def test_name_get_none(self):
guideline, _ = self.objectGenerator("guideline")
self.assertIsNone(guideline.name)
def test_name_set_valid(self):
guideline = self.getGuideline_generic()
guideline.name = u"foo"
self.assertEqual(guideline.name, u"foo")
def test_name_set_none(self):
guideline = self.getGuideline_generic()
guideline.name = None
self.assertIsNone(guideline.name)
def test_name_set_invalid(self):
guideline = self.getGuideline_generic()
with self.assertRaises(TypeError):
guideline.name = 123
# color
def test_color_get_none(self):
guideline = self.getGuideline_generic()
self.assertIsNone(guideline.color)
def test_color_set_valid_max(self):
guideline = self.getGuideline_generic()
guideline.color = (1, 1, 1, 1)
self.assertEqual(guideline.color, (1, 1, 1, 1))
def test_color_set_valid_min(self):
guideline = self.getGuideline_generic()
guideline.color = (0, 0, 0, 0)
self.assertEqual(guideline.color, (0, 0, 0, 0))
def test_color_set_valid_decimal(self):
guideline = self.getGuideline_generic()
guideline.color = (0.1, 0.2, 0.3, 0.4)
self.assertEqual(guideline.color, (0.1, 0.2, 0.3, 0.4))
def test_color_set_none(self):
guideline = self.getGuideline_generic()
guideline.color = None
self.assertIsNone(guideline.color)
def test_color_set_invalid_over_max(self):
guideline = self.getGuideline_generic()
with self.assertRaises(ValueError):
guideline.color = (1.1, 0.2, 0.3, 0.4)
def test_color_set_invalid_uner_min(self):
guideline = self.getGuideline_generic()
with self.assertRaises(ValueError):
guideline.color = (-0.1, 0.2, 0.3, 0.4)
def test_color_set_invalid_too_few(self):
guideline = self.getGuideline_generic()
with self.assertRaises(ValueError):
guideline.color = (0.1, 0.2, 0.3)
def test_color_set_invalid_string(self):
guideline = self.getGuideline_generic()
with self.assertRaises(TypeError):
guideline.color = "0.1,0.2,0.3,0.4"
def test_color_set_invalid_int(self):
guideline = self.getGuideline_generic()
with self.assertRaises(TypeError):
guideline.color = 123
# identifier
def test_identifier_get_none(self):
guideline = self.getGuideline_generic()
self.assertIsNone(guideline.identifier)
def test_identifier_generated_type(self):
guideline = self.getGuideline_generic()
guideline.getIdentifier()
self.assertIsInstance(guideline.identifier, str)
def test_identifier_consistency(self):
guideline = self.getGuideline_generic()
guideline.getIdentifier()
# get: twice to test consistency
self.assertEqual(guideline.identifier, guideline.identifier)
def test_identifier_cannot_set(self):
# identifier is a read-only property
guideline = self.getGuideline_generic()
with self.assertRaises(FontPartsError):
guideline.identifier = "ABC"
def test_identifier_force_set(self):
identifier = "ABC"
guideline = self.getGuideline_generic()
guideline._setIdentifier(identifier)
self.assertEqual(guideline.identifier, identifier)
# -------
# Methods
# -------
def getGuideline_copy(self):
guideline = self.getGuideline_generic()
guideline.name = "foo"
guideline.color = (0.1, 0.2, 0.3, 0.4)
return guideline
# copy
def test_copy_seperate_objects(self):
guideline = self.getGuideline_copy()
copied = guideline.copy()
self.assertIsNot(guideline, copied)
def test_copy_same_name(self):
guideline = self.getGuideline_copy()
copied = guideline.copy()
self.assertEqual(guideline.name, copied.name)
def test_copy_same_color(self):
guideline = self.getGuideline_copy()
copied = guideline.copy()
self.assertEqual(guideline.color, copied.color)
def test_copy_same_identifier(self):
guideline = self.getGuideline_copy()
copied = guideline.copy()
self.assertEqual(guideline.identifier, copied.identifier)
def test_copy_generated_identifier_different(self):
guideline = self.getGuideline_copy()
copied = guideline.copy()
guideline.getIdentifier()
copied.getIdentifier()
self.assertNotEqual(guideline.identifier, copied.identifier)
def test_copy_same_x(self):
guideline = self.getGuideline_copy()
copied = guideline.copy()
self.assertEqual(guideline.x, copied.x)
def test_copy_same_y(self):
guideline = self.getGuideline_copy()
copied = guideline.copy()
self.assertEqual(guideline.y, copied.y)
def test_copy_same_angle(self):
guideline = self.getGuideline_copy()
copied = guideline.copy()
self.assertEqual(guideline.angle, copied.angle)
# transform
def getGuideline_transform(self):
guideline = self.getGuideline_generic()
guideline.angle = 45.0
return guideline
def test_transformBy_valid_no_origin(self):
guideline = self.getGuideline_transform()
guideline.transformBy((2, 0, 0, 3, -3, 2))
self.assertEqual(guideline.x, -1)
self.assertEqual(guideline.y, 8)
self.assertAlmostEqual(guideline.angle, 56.310, places=3)
def test_transformBy_valid_origin(self):
guideline = self.getGuideline_transform()
guideline.transformBy((2, 0, 0, 2, 0, 0), origin=(1, 2))
self.assertEqual(guideline.x, 1)
self.assertEqual(guideline.y, 2)
self.assertAlmostEqual(guideline.angle, 45.000, places=3)
def test_transformBy_invalid_one_string_value(self):
guideline = self.getGuideline_transform()
with self.assertRaises(TypeError):
guideline.transformBy((1, 0, 0, 1, 0, "0"))
def test_transformBy_invalid_all_string_values(self):
guideline = self.getGuideline_transform()
with self.assertRaises(TypeError):
guideline.transformBy("1, 0, 0, 1, 0, 0")
def test_transformBy_invalid_int_value(self):
guideline = self.getGuideline_transform()
with self.assertRaises(TypeError):
guideline.transformBy(123)
# moveBy
def test_moveBy_valid(self):
guideline = self.getGuideline_transform()
guideline.moveBy((-1, 2))
self.assertEqual(guideline.x, 0)
self.assertEqual(guideline.y, 4)
self.assertAlmostEqual(guideline.angle, 45.000, places=3)
def test_moveBy_invalid_one_string_value(self):
guideline = self.getGuideline_transform()
with self.assertRaises(TypeError):
guideline.moveBy((-1, "2"))
def test_moveBy_invalid_all_strings_value(self):
guideline = self.getGuideline_transform()
with self.assertRaises(TypeError):
guideline.moveBy("-1, 2")
def test_moveBy_invalid_int_value(self):
guideline = self.getGuideline_transform()
with self.assertRaises(TypeError):
guideline.moveBy(1)
# scaleBy
def test_scaleBy_valid_one_value_no_origin(self):
guideline = self.getGuideline_transform()
guideline.scaleBy((-2))
self.assertEqual(guideline.x, -2)
self.assertEqual(guideline.y, -4)
self.assertAlmostEqual(guideline.angle, 225.000, places=3)
def test_scaleBy_valid_two_values_no_origin(self):
guideline = self.getGuideline_transform()
guideline.scaleBy((-2, 3))
self.assertEqual(guideline.x, -2)
self.assertEqual(guideline.y, 6)
self.assertAlmostEqual(guideline.angle, 123.690, places=3)
def test_scaleBy_valid_two_values_origin(self):
guideline = self.getGuideline_transform()
guideline.scaleBy((-2, 3), origin=(1, 2))
self.assertEqual(guideline.x, 1)
self.assertEqual(guideline.y, 2)
self.assertAlmostEqual(guideline.angle, 123.690, places=3)
def test_scaleBy_invalid_one_string_value(self):
guideline = self.getGuideline_transform()
with self.assertRaises(TypeError):
guideline.scaleBy((-1, "2"))
def test_scaleBy_invalid_two_string_values(self):
guideline = self.getGuideline_transform()
with self.assertRaises(TypeError):
guideline.scaleBy("-1, 2")
def test_scaleBy_invalid_tuple_too_many_values(self):
guideline = self.getGuideline_transform()
with self.assertRaises(ValueError):
guideline.scaleBy((-1, 2, -3))
# rotateBy
def test_rotateBy_valid_no_origin(self):
guideline = self.getGuideline_transform()
guideline.rotateBy(45)
self.assertAlmostEqual(guideline.x, -0.707, places=3)
self.assertAlmostEqual(guideline.y, 2.121, places=3)
self.assertAlmostEqual(guideline.angle, 0.000, places=3)
def test_rotateBy_valid_origin(self):
guideline = self.getGuideline_transform()
guideline.rotateBy(45, origin=(1, 2))
self.assertAlmostEqual(guideline.x, 1)
self.assertAlmostEqual(guideline.y, 2)
self.assertAlmostEqual(guideline.angle, 0.000, places=3)
def test_rotateBy_invalid_string_value(self):
guideline = self.getGuideline_transform()
with self.assertRaises(TypeError):
guideline.rotateBy("45")
def test_rotateBy_invalid_too_large_value_positive(self):
guideline = self.getGuideline_transform()
with self.assertRaises(ValueError):
guideline.rotateBy(361)
def test_rotateBy_invalid_too_large_value_negative(self):
guideline = self.getGuideline_transform()
with self.assertRaises(ValueError):
guideline.rotateBy(-361)
# skewBy
def test_skewBy_valid_no_origin_one_value(self):
guideline = self.getGuideline_transform()
guideline.skewBy(100)
self.assertAlmostEqual(guideline.x, -10.343, places=3)
self.assertEqual(guideline.y, 2.0)
self.assertAlmostEqual(guideline.angle, 8.525, places=3)
def test_skewBy_valid_no_origin_two_values(self):
guideline = self.getGuideline_transform()
guideline.skewBy((100, 200))
self.assertAlmostEqual(guideline.x, -10.343, places=3)
self.assertAlmostEqual(guideline.y, 2.364, places=3)
self.assertAlmostEqual(guideline.angle, 5.446, places=3)
def test_skewBy_valid_origin_one_value(self):
guideline = self.getGuideline_transform()
guideline.skewBy(100, origin=(1, 2))
self.assertEqual(guideline.x, 1)
self.assertEqual(guideline.y, 2)
self.assertAlmostEqual(guideline.angle, 8.525, places=3)
def test_skewBy_valid_origin_two_values(self):
guideline = self.getGuideline_transform()
guideline.skewBy((100, 200), origin=(1, 2))
self.assertEqual(guideline.x, 1)
self.assertEqual(guideline.y, 2)
self.assertAlmostEqual(guideline.angle, 5.446, places=3)
# -------------
# Normalization
# -------------
# round
def getGuideline_round(self):
guideline = self.getGuideline_generic()
guideline.x = 1.1
guideline.y = 2.5
guideline.angle = 45.5
return guideline
def test_round_close_to(self):
guideline = self.getGuideline_round()
guideline.round()
self.assertEqual(guideline.x, 1)
def test_round_at_half(self):
guideline = self.getGuideline_round()
guideline.round()
self.assertEqual(guideline.y, 3)
def test_round_angle(self):
guideline = self.getGuideline_round()
guideline.round()
self.assertEqual(guideline.angle, 45.5)
# ----
# Hash
# ----
def test_hash_object_self(self):
guideline_one = self.getGuideline_generic()
self.assertEqual(
hash(guideline_one),
hash(guideline_one)
)
def test_hash_object_other(self):
guideline_one = self.getGuideline_generic()
guideline_two = self.getGuideline_generic()
self.assertNotEqual(
hash(guideline_one),
hash(guideline_two)
)
def test_hash_object_self_variable_assignment(self):
guideline_one = self.getGuideline_generic()
a = guideline_one
self.assertEqual(
hash(guideline_one),
hash(a)
)
def test_hash_object_other_variable_assignment(self):
guideline_one = self.getGuideline_generic()
guideline_two = self.getGuideline_generic()
a = guideline_one
self.assertNotEqual(
hash(guideline_two),
hash(a)
)
def test_is_hashable(self):
guideline_one = self.getGuideline_generic()
self.assertTrue(
isinstance(guideline_one, collections.abc.Hashable)
)
# -------
# Parents
# -------
def test_get_parent_font(self):
font, _ = self.objectGenerator("font")
layer = font.newLayer("L")
glyph = layer.newGlyph("X")
guideline = glyph.appendGuideline((0, 0), 90, "Test Guideline")
self.assertIsNotNone(guideline.font)
self.assertEqual(
guideline.font,
font
)
def test_get_parent_noFont(self):
layer, _ = self.objectGenerator("layer")
glyph = layer.newGlyph("X")
guideline = glyph.appendGuideline((0, 0), 90, "Test Guideline")
self.assertIsNone(guideline.font)
def test_get_parent_layer(self):
layer, _ = self.objectGenerator("layer")
glyph = layer.newGlyph("X")
guideline = glyph.appendGuideline((0, 0), 90, "Test Guideline")
self.assertIsNotNone(guideline.layer)
self.assertEqual(
guideline.layer,
layer
)
def test_get_parent_noLayer(self):
glyph, _ = self.objectGenerator("glyph")
guideline = glyph.appendGuideline((0, 0), 90, "Test Guideline")
self.assertIsNone(guideline.font)
self.assertIsNone(guideline.layer)
def test_get_parent_glyph(self):
glyph, _ = self.objectGenerator("glyph")
guideline = glyph.appendGuideline((0, 0), 90, "Test Guideline")
self.assertIsNotNone(guideline.glyph)
self.assertEqual(
guideline.glyph,
glyph
)
def test_get_parent_noGlyph(self):
guideline, _ = self.objectGenerator("guideline")
self.assertIsNone(guideline.font)
self.assertIsNone(guideline.layer)
self.assertIsNone(guideline.glyph)
def test_set_parent_glyph(self):
glyph, _ = self.objectGenerator("glyph")
guideline = self.getGuideline_generic()
guideline.glyph = glyph
self.assertIsNotNone(guideline.glyph)
self.assertEqual(
guideline.glyph,
glyph
)
def test_set_parent_glyph_none(self):
guideline, _ = self.objectGenerator("guideline")
guideline.glyph = None
self.assertIsNone(guideline.glyph)
def test_set_parent_font_none(self):
guideline, _ = self.objectGenerator("guideline")
guideline.font = None
self.assertIsNone(guideline.glyph)
def test_set_parent_glyph_exists(self):
glyph, _ = self.objectGenerator("glyph")
otherGlyph, _ = self.objectGenerator("glyph")
guideline = glyph.appendGuideline((0, 0), 90, "Test Guideline")
with self.assertRaises(AssertionError):
guideline.glyph = otherGlyph
def test_set_parent_glyph_font_exists(self):
guideline = self.getGuideline_fontGuideline()
glyph, _ = self.objectGenerator("glyph")
with self.assertRaises(AssertionError):
guideline.glyph = glyph
def test_set_parent_font_font_exists(self):
guideline = self.getGuideline_fontGuideline()
font, _ = self.objectGenerator("font")
with self.assertRaises(AssertionError):
guideline.font = font
def test_set_parent_font_glyph_exists(self):
guideline = self.getGuideline_glyphGuideline()
font, _ = self.objectGenerator("font")
with self.assertRaises(AssertionError):
guideline.font = font
# --------
# Equality
# --------
def test_object_equal_self(self):
guideline_one = self.getGuideline_generic()
self.assertEqual(
guideline_one,
guideline_one
)
def test_object_not_equal_other(self):
guideline_one = self.getGuideline_generic()
guideline_two = self.getGuideline_generic()
self.assertNotEqual(
guideline_one,
guideline_two
)
def test_object_equal_self_variable_assignment(self):
guideline_one = self.getGuideline_generic()
a = guideline_one
a.x = 200
self.assertEqual(
guideline_one,
a
)
def test_object_not_equal_other_variable_assignment(self):
guideline_one = self.getGuideline_generic()
guideline_two = self.getGuideline_generic()
a = guideline_one
self.assertNotEqual(
guideline_two,
a
)
# ---------
# Selection
# ---------
def test_selected_true(self):
guideline = self.getGuideline_generic()
try:
guideline.selected = False
except NotImplementedError:
return
guideline.selected = True
self.assertEqual(
guideline.selected,
True
)
def test_not_selected_false(self):
guideline = self.getGuideline_generic()
try:
guideline.selected = False
except NotImplementedError:
return
self.assertEqual(
guideline.selected,
False
)
|
StarcoderdataPython
|
278844
|
<reponame>aheck/reflectrpc<gh_stars>10-100
from __future__ import unicode_literals, print_function
from builtins import bytes, dict, list, int, float, str
import json
import sys
from cmd import Cmd
from reflectrpc.client import RpcClient
from reflectrpc.client import RpcError
import reflectrpc
import reflectrpc.cmdline
def print_types(types):
for t in types:
if t['type'] == 'enum':
print('enum: %s' % (t['name']))
print('Description: %s' % (t['description']))
for value in t['values']:
print(' [%d] %s - %s' % (value['intvalue'], value['name'], value['description']))
elif t['type'] == 'hash':
print('hash: %s' % (t['name']))
print('Description: %s' % (t['description']))
for field in t['fields']:
print(' [%s] %s - %s' % (field['type'], field['name'], field['description']))
else:
print('Unknown class of custom type: %s' % (t['type']))
def print_functions(functions):
for func_desc in functions:
paramlist = [param['name'] for param in func_desc['params']]
paramlist = ', '.join(paramlist)
print("%s(%s) - %s" % (func_desc['name'], paramlist, func_desc['description']))
for param in func_desc['params']:
print(" [%s] %s - %s" % (param['type'], param['name'], param['description']))
print(" Result: %s - %s" % (func_desc['result_type'], func_desc['result_desc']))
def split_exec_line(line):
tokens = []
curtoken = ''
intoken = False
instring = False
lastc = ''
arraylevel = 0
hashlevel = 0
for c in line:
if c.isspace():
if not intoken:
lastc = c
continue
# end of token?
if not arraylevel and not hashlevel and not instring:
tokens.append(curtoken.strip())
curtoken = ''
intoken = False
else:
intoken = True
if intoken:
curtoken += c
if c == '"':
if lastc != '\\':
instring = not instring
elif c == '[':
if not instring:
arraylevel += 1
elif c == ']':
if not instring:
arraylevel -= 1
elif c == '{':
if not instring:
hashlevel += 1
elif c == '}':
if not instring:
hashlevel -= 1
lastc = c
if len(curtoken.strip()):
tokens.append(curtoken.strip())
# type casting
itertokens = iter(tokens)
next(itertokens) # skip first token which is the method name
for i, t in enumerate(itertokens):
i += 1
try:
tokens[i] = json.loads(t)
except ValueError as e:
print("Invalid JSON in parameter %i:" % (i))
print("'%s'" % (t))
return None
return tokens
class ReflectRpcShell(Cmd):
def __init__(self, client):
if issubclass(Cmd, object):
super().__init__()
else:
Cmd.__init__(self)
self.client = client
def connect(self):
self.client.enable_auto_reconnect()
try:
self.retrieve_service_description()
self.retrieve_functions()
self.retrieve_custom_types()
except reflectrpc.client.NetworkError as e:
print(e, file=sys.stderr)
print('', file=sys.stderr)
reflectrpc.cmdline.connection_failed_error(self.client.host,
self.client.port, True)
except reflectrpc.client.HttpException as e:
if e.status == '401':
print('Authentication failed\n', file=sys.stderr)
reflectrpc.cmdline.connection_failed_error(self.client.host,
self.client.port, True)
raise e
self.prompt = '(rpc) '
if self.client.host.startswith('unix://'):
self.intro = "ReflectRPC Shell\n================\n\nType 'help' for available commands\n\nRPC server: %s" % (self.client.host)
else:
self.intro = "ReflectRPC Shell\n================\n\nType 'help' for available commands\n\nRPC server: %s:%i" % (self.client.host, self.client.port)
if self.service_description:
self.intro += "\n\nSelf-description of the Service:\n================================\n"
if self.service_description['name']:
self.intro += self.service_description['name']
if self.service_description['version']:
self.intro += " (%s)\n" % (self.service_description['version'])
if self.service_description['description']:
self.intro += self.service_description['description']
def retrieve_service_description(self):
self.service_description = ''
try:
self.service_description = self.client.rpc_call('__describe_service')
except RpcError:
pass
def retrieve_functions(self):
self.functions = []
try:
self.functions = self.client.rpc_call('__describe_functions')
except RpcError:
pass
def retrieve_custom_types(self):
self.custom_types = []
try:
self.custom_types = self.client.rpc_call('__describe_custom_types')
except RpcError:
pass
def complete_doc(self, text, line, start_index, end_index):
return self.function_completion(text, line)
def complete_exec(self, text, line, start_index, end_index):
return self.function_completion(text, line)
def complete_notify(self, text, line, start_index, end_index):
return self.function_completion(text, line)
def function_completion(self, text, line):
if len(line.split()) > 2:
return []
if len(line.split()) == 2 and text == '':
return []
result = [f['name'] for f in self.functions if f['name'].startswith(text)]
if len(result) == 1 and result[0] == text:
return []
return result
def complete_type(self, text, line, start_index, end_index):
if len(line.split()) > 2:
return []
if len(line.split()) == 2 and text == '':
return []
result = [t['name'] for t in self.custom_types if t['name'].startswith(text)]
if len(result) == 1 and result[0] == text:
return []
return result
def do_help(self, line):
if not line:
print("list - List all RPC functions advertised by the server")
print("doc - Show the documentation of a RPC function")
print("type - Show the documentation of a custom RPC type")
print("types - List all custom RPC types advertised by the server")
print("exec - Execute an RPC call")
print("notify - Execute an RPC call but tell the server to send no response")
print("raw - Directly send a raw JSON-RPC message to the server")
print("quit - Quit this program")
print("help - Print this message. 'help [command]' prints a")
print(" detailed help message for a command")
return
if line == 'list':
print("List all RPC functions advertised by the server")
elif line == 'doc':
print("Show the documentation of an RPC function")
print("Example:")
print(" doc echo")
elif line == 'type':
print("Shos the documentation of a custom RPC type")
print("Example:")
print(" type PhoneType")
elif line == 'types':
print("List all custom RPC types advertised by the server")
elif line == 'exec':
print("Execute an RPC call")
print("Examples:")
print(" exec echo \"Hello RPC server\"")
print(" exec add 4 8")
elif line == 'notify':
print("Execute an RPC call but tell the server to send no response")
print("Example:")
print(" notify rpc_function")
elif line == 'raw':
print("Directly send a raw JSON-RPC message to the server")
print("Example:")
print(' raw {"method": "echo", "params": ["Hello Server"], "id": 1}')
elif line == 'quit':
print("Quit this program")
elif line == 'help':
pass
else:
print("No help available for unknown command:", line)
def do_type(self, line):
if not line:
print("You have to pass the name of a custom RPC type: 'type [typename]'")
return
t = [t for t in self.custom_types if t['name'] == line]
if not t:
print("Unknown custom RPC type:", line)
print_types(t)
def do_types(self, line):
for t in self.custom_types:
print(t['name'])
def do_exec(self, line):
tokens = split_exec_line(line)
if not tokens:
return
method = tokens.pop(0)
try:
result = self.client.rpc_call(method, *tokens)
print("Server replied:", json.dumps(result, indent=4, sort_keys=True))
except RpcError as e:
print(e)
def do_notify(self, line):
tokens = split_exec_line(line)
if not tokens:
return
method = tokens.pop(0)
self.client.rpc_notify(method, *tokens)
def do_raw(self, line):
print(self.client.rpc_call_raw(line))
def do_doc(self, line):
if not line:
print("You have to pass the name of an RPC function: 'doc [function]'")
return
function = [func for func in self.functions if func['name'] == line]
if not function:
print("Unknown RPC function:", line)
print_functions(function)
def do_list(self, line):
for func in self.functions:
paramlist = [param['name'] for param in func['params']]
print("%s(%s)" % (func['name'], ', '.join(paramlist)))
def do_quit(self, line):
sys.exit(0)
def do_EOF(self, line):
sys.exit(0)
|
StarcoderdataPython
|
151450
|
#!/usr/bin/env python
#
# Copyright (C) 2017 ShadowMan
#
class FatalError(Exception):
pass
class FrameHeaderParseError(Exception):
pass
class ConnectClosed(Exception):
pass
class RequestError(Exception):
pass
class LoggerWarning(RuntimeWarning):
pass
class DeamonError(Exception):
pass
class SendDataPackError(Exception):
pass
class InvalidResponse(Exception):
pass
class ParameterError(Exception):
pass
def raise_parameter_error(name, except_type, got_val):
if not isinstance(got_val, except_type):
raise ParameterError(
'{name} except {except_type}, got {got_type}'.format(
name=name,
except_type=except_type.__name__,
got_type=type(got_val).__name__))
class ExitWrite(Exception):
pass
class BroadcastError(Exception):
pass
class HttpVerifierError(Exception):
pass
class FrameVerifierError(Exception):
pass
class WSSCertificateFileNotFound(Exception):
pass
|
StarcoderdataPython
|
12807077
|
<gh_stars>1-10
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
Test the design_matrix utilities.
Note that the tests just looks whether the data produces has correct dimension,
not whether it is exact
"""
import numpy as np
from os.path import join, dirname
from ..experimental_paradigm import (EventRelatedParadigm, BlockParadigm,
load_protocol_from_csv_file)
from ..design_matrix import (
dmtx_light, _convolve_regressors, DesignMatrix, dmtx_from_csv, make_dmtx)
from nose.tools import assert_true, assert_equal
from numpy.testing import assert_almost_equal
from ....testing import parametric
def basic_paradigm():
conditions = ['c0', 'c0', 'c0', 'c1', 'c1', 'c1', 'c2', 'c2', 'c2']
onsets = [30, 70, 100, 10, 30, 90, 30, 40, 60]
paradigm = EventRelatedParadigm(conditions, onsets)
return paradigm
def modulated_block_paradigm():
conditions = ['c0', 'c0', 'c0', 'c1', 'c1', 'c1', 'c2', 'c2', 'c2']
onsets = [30, 70, 100, 10, 30, 90, 30, 40, 60]
duration = 5 + 5 * np.random.rand(len(onsets))
values = 1 + np.random.rand(len(onsets))
paradigm = BlockParadigm(conditions, onsets, duration, values)
return paradigm
def modulated_event_paradigm():
conditions = ['c0', 'c0', 'c0', 'c1', 'c1', 'c1', 'c2', 'c2', 'c2']
onsets = [30, 70, 100, 10, 30, 90, 30, 40, 60]
values = 1 + np.random.rand(len(onsets))
paradigm = EventRelatedParadigm(conditions, onsets, values)
return paradigm
def block_paradigm():
conditions = ['c0', 'c0', 'c0', 'c1', 'c1', 'c1', 'c2', 'c2', 'c2']
onsets = [30, 70, 100, 10, 30, 90, 30, 40, 60]
duration = 5 * np.ones(9)
paradigm = BlockParadigm (conditions, onsets, duration)
return paradigm
def test_show_dmtx():
# test that the show code indeed (formally) runs
frametimes = np.linspace(0, 127 * 1.,128)
DM = make_dmtx(frametimes, drift_model='Polynomial', drift_order=3)
ax = DM.show()
assert (ax is not None)
def test_dmtx0():
# Test design matrix creation when no paradigm is provided
tr = 1.0
frametimes = np.linspace(0, 127 * tr,128)
X, names= dmtx_light(frametimes, drift_model='Polynomial',
drift_order=3)
print names
assert_true(len(names)==4)
def test_dmtx0b():
# Test design matrix creation when no paradigm is provided
tr = 1.0
frametimes = np.linspace(0, 127 * tr,128)
X, names= dmtx_light(frametimes, drift_model='Polynomial',
drift_order=3)
assert_almost_equal(X[:, 0], np.linspace(- 0.5, .5, 128))
def test_dmtx0c():
# test design matrix creation when regressors are provided manually
tr = 1.0
frametimes = np.linspace(0, 127 * tr, 128)
ax = np.random.randn(128, 4)
X, names= dmtx_light(frametimes, drift_model='Polynomial',
drift_order=3, add_regs=ax)
assert_almost_equal(X[:, 0], ax[:, 0])
def test_dmtx0d():
# test design matrix creation when regressors are provided manually
tr = 1.0
frametimes = np.linspace(0, 127 * tr, 128)
ax = np.random.randn(128, 4)
X, names= dmtx_light(frametimes, drift_model='Polynomial',
drift_order=3, add_regs=ax)
assert_true((len(names) == 8) & (X.shape[1] == 8))
def test_dmtx1():
# basic test based on basic_paradigm and canonical hrf
tr = 1.0
frametimes = np.linspace(0, 127 * tr, 128)
paradigm = basic_paradigm()
hrf_model = 'Canonical'
X, names= dmtx_light(frametimes, paradigm, hrf_model=hrf_model,
drift_model='Polynomial', drift_order=3)
assert_true(len(names)==7)
def test_convolve_regressors():
# tests for convolve_regressors helper function
conditions = ['c0', 'c1']
onsets = [20, 40]
paradigm = EventRelatedParadigm(conditions, onsets)
# names not passed -> default names
frametimes = np.arange(100)
f, names = _convolve_regressors(paradigm, 'Canonical', frametimes)
assert_equal(names, ['c0', 'c1'])
def test_dmtx1b():
# idem test_dmtx1, but different test
tr = 1.0
frametimes = np.linspace(0, 127 * tr, 128)
paradigm = basic_paradigm()
hrf_model = 'Canonical'
X, names= dmtx_light(frametimes, paradigm, hrf_model=hrf_model,
drift_model='Polynomial', drift_order=3)
print np.shape(X)
assert_true(X.shape == (128, 7))
def test_dmtx1c():
# idem test_dmtx1, but different test
tr = 1.0
frametimes = np.linspace(0, 127 *tr, 128)
paradigm = basic_paradigm()
hrf_model = 'Canonical'
X,names = dmtx_light(frametimes, paradigm, hrf_model=hrf_model,
drift_model='Polynomial', drift_order=3)
assert_true((X[:, - 1] == 1).all())
def test_dmtx1d():
# idem test_dmtx1, but different test
tr = 1.0
frametimes = np.linspace(0, 127 * tr, 128)
paradigm = basic_paradigm()
hrf_model = 'Canonical'
X,names= dmtx_light(frametimes, paradigm, hrf_model=hrf_model,
drift_model='Polynomial', drift_order=3)
assert_true((np.isnan(X) == 0).all())
def test_dmtx2():
# idem test_dmtx1 with a different drift term
tr = 1.0
frametimes = np.linspace(0, 127 * tr, 128)
paradigm = basic_paradigm()
hrf_model = 'Canonical'
X, names= dmtx_light(frametimes, paradigm, hrf_model=hrf_model,
drift_model='Cosine', hfcut=63)
assert_true(len(names) == 8)
def test_dmtx3():
# idem test_dmtx1 with a different drift term
tr = 1.0
frametimes = np.linspace(0, 127 * tr, 128)
paradigm = basic_paradigm()
hrf_model = 'Canonical'
X,names= dmtx_light(frametimes, paradigm, hrf_model=hrf_model,
drift_model='Blank')
print names
assert_true(len(names) == 4)
def test_dmtx4():
# idem test_dmtx1 with a different hrf model
tr = 1.0
frametimes = np.linspace(0, 127 * tr, 128)
paradigm = basic_paradigm()
hrf_model = 'Canonical With Derivative'
X, names= dmtx_light(frametimes, paradigm, hrf_model=hrf_model,
drift_model='Polynomial', drift_order=3)
assert_true(len(names) == 10)
def test_dmtx5():
# idem test_dmtx1 with a block paradigm
tr = 1.0
frametimes = np.linspace(0, 127 * tr, 128)
paradigm = block_paradigm()
hrf_model = 'Canonical'
X, names= dmtx_light(frametimes, paradigm, hrf_model=hrf_model,
drift_model='Polynomial', drift_order=3)
assert_true(len(names) == 7)
def test_dmtx6():
# idem test_dmtx1 with a block paradigm and the hrf derivative
tr = 1.0
frametimes = np.linspace(0, 127 * tr, 128)
paradigm = block_paradigm()
hrf_model = 'Canonical With Derivative'
X, names= dmtx_light(frametimes, paradigm, hrf_model=hrf_model,
drift_model='Polynomial', drift_order=3)
assert_true(len(names) == 10)
def test_dmtx7():
# idem test_dmtx1, but odd paradigm
tr = 1.0
frametimes = np.linspace(0, 127 * tr, 128)
conditions = [0, 0, 0, 1, 1, 1, 3, 3, 3]
# no condition 'c2'
onsets = [30, 70, 100, 10, 30, 90, 30, 40, 60]
paradigm = EventRelatedParadigm(conditions, onsets)
hrf_model = 'Canonical'
X, names = dmtx_light(frametimes, paradigm, hrf_model=hrf_model,
drift_model='Polynomial', drift_order=3)
assert_true(len(names) == 7)
def test_dmtx8():
# basic test based on basic_paradigm and FIR
tr = 1.0
frametimes = np.linspace(0, 127 * tr, 128)
paradigm = basic_paradigm()
hrf_model = 'FIR'
X, names= dmtx_light(frametimes, paradigm, hrf_model=hrf_model,
drift_model='Polynomial', drift_order=3)
assert_true(len(names) == 7)
def test_dmtx9():
# basic test based on basic_paradigm and FIR
tr = 1.0
frametimes = np.linspace(0, 127 * tr, 128)
paradigm = basic_paradigm()
hrf_model = 'FIR'
X, names = dmtx_light(frametimes, paradigm, hrf_model=hrf_model,
drift_model='Polynomial', drift_order=3,
fir_delays=range(1, 5))
assert_true(len(names) == 16)
def test_dmtx10():
# Check that the first column o FIR design matrix is OK
tr = 1.0
frametimes = np.linspace(0, 127 * tr, 128)
paradigm = basic_paradigm()
hrf_model = 'FIR'
X, names = dmtx_light(frametimes, paradigm, hrf_model=hrf_model,
drift_model='Polynomial', drift_order=3,
fir_delays=range(1, 5))
onset = paradigm.onset[paradigm.con_id == 'c0'].astype(np.int)
assert_true(np.all((X[onset + 1, 0] == 1)))
def test_dmtx11():
# check that the second column of the FIR design matrix is OK indeed
tr = 1.0
frametimes = np.linspace(0, 127 * tr, 128)
paradigm = basic_paradigm()
hrf_model = 'FIR'
X, names = dmtx_light(frametimes, paradigm, hrf_model=hrf_model,
drift_model='Polynomial', drift_order=3,
fir_delays=range(1, 5))
onset = paradigm.onset[paradigm.con_id == 'c0'].astype(np.int)
assert_true(np.all(X[onset + 3, 2] == 1))
def test_dmtx12():
# check that the 11th column of a FIR design matrix is indeed OK
tr = 1.0
frametimes = np.linspace(0, 127 * tr,128)
paradigm = basic_paradigm()
hrf_model = 'FIR'
X, names = dmtx_light(frametimes, paradigm, hrf_model=hrf_model,
drift_model='Polynomial', drift_order=3,
fir_delays=range(1, 5))
onset = paradigm.onset[paradigm.con_id == 'c2'].astype(np.int)
assert_true(np.all(X[onset + 4, 11] == 1))
def test_dmtx13():
# Check that the fir_duration is well taken into account
tr = 1.0
frametimes = np.linspace(0, 127 * tr, 128)
paradigm = basic_paradigm()
hrf_model = 'FIR'
X, names = dmtx_light(frametimes, paradigm, hrf_model=hrf_model,
drift_model='Polynomial', drift_order=3,
fir_delays=range(1, 5))
onset = paradigm.onset[paradigm.con_id == 'c0'].astype(np.int)
assert_true(np.all(X[onset + 1, 0] == 1))
def test_dmtx14():
# Check that the first column o FIR design matrix is OK after a 1/2
# time shift
tr = 1.0
frametimes = np.linspace(0, 127 * tr, 128) + tr / 2
paradigm = basic_paradigm()
hrf_model = 'FIR'
X, names = dmtx_light(frametimes, paradigm, hrf_model=hrf_model,
drift_model='Polynomial', drift_order=3,
fir_delays=range(1, 5))
onset = paradigm.onset[paradigm.con_id == 'c0'].astype(np.int)
assert_true(np.all(X[onset + 1, 0] == 1))
def test_dmtx15():
# basic test based on basic_paradigm, plus user supplied regressors
tr = 1.0
frametimes = np.linspace(0, 127 * tr, 128)
paradigm = basic_paradigm()
hrf_model = 'Canonical'
ax = np.random.randn(128, 4)
X, names = dmtx_light(frametimes, paradigm, hrf_model=hrf_model,
drift_model='Polynomial', drift_order=3, add_regs=ax)
assert(len(names) == 11)
assert(X.shape[1] == 11)
def test_dmtx16():
# Check that additional regressors are put at the right place
tr = 1.0
frametimes = np.linspace(0, 127 * tr, 128)
paradigm = basic_paradigm()
hrf_model = 'Canonical'
ax = np.random.randn(128, 4)
X, names = dmtx_light(frametimes, paradigm, hrf_model=hrf_model,
drift_model='Polynomial', drift_order=3, add_regs=ax)
assert_almost_equal(X[:, 3: 7], ax)
def test_dmtx17():
# Test the effect of scaling on the events
tr = 1.0
frametimes = np.linspace(0, 127 * tr, 128)
paradigm = modulated_event_paradigm()
hrf_model = 'Canonical'
X, names = dmtx_light(frametimes, paradigm, hrf_model=hrf_model,
drift_model='Polynomial', drift_order=3)
ct = paradigm.onset[paradigm.con_id == 'c0'].astype(np.int) + 1
assert((X[ct, 0] > 0).all())
def test_dmtx18():
# Test the effect of scaling on the blocks
tr = 1.0
frametimes = np.linspace(0, 127 * tr, 128)
paradigm = modulated_block_paradigm()
hrf_model = 'Canonical'
X, names = dmtx_light(frametimes, paradigm, hrf_model=hrf_model,
drift_model='Polynomial', drift_order=3)
ct = paradigm.onset[paradigm.con_id == 'c0'].astype(np.int) + 3
assert((X[ct, 0] > 0).all())
def test_dmtx19():
# Test the effect of scaling on a FIR model
tr = 1.0
frametimes = np.linspace(0, 127 * tr, 128)
paradigm = modulated_event_paradigm()
hrf_model = 'FIR'
X, names = dmtx_light(frametimes, paradigm, hrf_model=hrf_model,
drift_model='Polynomial', drift_order=3,
fir_delays=range(1, 5))
idx = paradigm.onset[paradigm.con_id == 0].astype(np.int)
assert_true((X[idx + 1, 0] == X[idx + 2, 1]).all())
def test_csv_io():
# test the csv io on design matrices
from tempfile import mkdtemp
from os.path import join
tr = 1.0
frametimes = np.linspace(0, 127 * tr, 128)
paradigm = modulated_event_paradigm()
DM = make_dmtx(frametimes, paradigm, hrf_model='Canonical',
drift_model='Polynomial', drift_order=3)
path = join(mkdtemp(), 'dmtx.csv')
DM.write_csv(path)
DM2 = dmtx_from_csv( path)
assert_almost_equal (DM.matrix, DM2.matrix)
assert_true (DM.names == DM2.names)
def test_spm_1():
# Check that the nipy design matrix is close enough to the SPM one
# (it cannot be identical, because the hrf shape is different)
tr = 1.0
frametimes = np.linspace(0, 99, 100)
conditions = ['c0', 'c0', 'c0', 'c1', 'c1', 'c1', 'c2', 'c2', 'c2']
onsets = [30, 50, 70, 10, 30, 80, 30, 40, 60]
hrf_model = 'Canonical'
paradigm = EventRelatedParadigm(conditions, onsets)
X1 = make_dmtx(frametimes, paradigm, drift_model='Blank')
spm_dmtx = np.load(join(dirname(__file__),'spm_dmtx.npz'))['arr_0']
assert ((spm_dmtx - X1.matrix) ** 2).sum() / (spm_dmtx ** 2).sum() < .1
def test_spm_2():
# Check that the nipy design matrix is close enough to the SPM one
# (it cannot be identical, because the hrf shape is different)
import os
tr = 1.0
frametimes = np.linspace(0, 99, 100)
conditions = ['c0', 'c0', 'c0', 'c1', 'c1', 'c1', 'c2', 'c2', 'c2']
onsets = [30, 50, 70, 10, 30, 80, 30, 40, 60]
duration = 10 * np.ones(9)
hrf_model = 'Canonical'
paradigm = BlockParadigm(conditions, onsets, duration)
X1 = make_dmtx(frametimes, paradigm, drift_model='Blank')
spm_dmtx = np.load(join(dirname(__file__),'spm_dmtx.npz'))['arr_1']
assert ((spm_dmtx - X1.matrix) ** 2).sum() / (spm_dmtx ** 2).sum() < .1
if __name__ == "__main__":
import nose
nose.run(argv=['', __file__])
|
StarcoderdataPython
|
179787
|
__all__ = [
'AutoInitAndCloseable',
'Disposable',
'NoReentrantContext',
'DisposableContext',
]
class AutoInitAndCloseable(object):
"""
Classes with :meth:`init()` to initialize its internal states, and also
:meth:`close()` to destroy these states. The :meth:`init()` method can
be repeatedly called, which will cause initialization only at the first
call. Thus other methods may always call :meth:`init()` at beginning,
which can bring auto-initialization to the class.
A context manager is implemented: :meth:`init()` is explicitly called
when entering the context, while :meth:`destroy()` is called when
exiting the context.
"""
_initialized = False
def _init(self):
"""Override this method to initialize the internal states."""
raise NotImplementedError()
def init(self):
"""Ensure the internal states are initialized."""
if not self._initialized:
self._init()
self._initialized = True
def __enter__(self):
"""Ensure the internal states are initialized."""
self.init()
return self
def _close(self):
"""Override this method to destroy the internal states."""
raise NotImplementedError()
def close(self):
"""Ensure the internal states are destroyed."""
if self._initialized:
try:
self._close()
finally:
self._initialized = False
def __exit__(self, exc_type, exc_val, exc_tb):
"""Cleanup the internal states."""
self.close()
class Disposable(object):
"""
Classes which can only be used once.
"""
_already_used = False
def _check_usage_and_set_used(self):
"""
Check whether the usage flag, ensure the object has not been used,
and then set it to be used.
"""
if self._already_used:
raise RuntimeError('Disposable object cannot be used twice: {!r}.'.
format(self))
self._already_used = True
class NoReentrantContext(object):
"""
Base class for contexts which are not reentrant (i.e., if there is
a context opened by ``__enter__``, and it has not called ``__exit__``,
the ``__enter__`` cannot be called again).
"""
_is_entered = False
def _enter(self):
"""
Enter the context. Subclasses should override this instead of
the true ``__enter__`` method.
"""
raise NotImplementedError()
def _exit(self, exc_type, exc_val, exc_tb):
"""
Exit the context. Subclasses should override this instead of
the true ``__exit__`` method.
"""
raise NotImplementedError()
def _require_entered(self):
"""
Require the context to be entered.
Raises:
RuntimeError: If the context is not entered.
"""
if not self._is_entered:
raise RuntimeError('Context is required be entered: {!r}.'.
format(self))
def __enter__(self):
if self._is_entered:
raise RuntimeError('Context is not reentrant: {!r}.'.
format(self))
ret = self._enter()
self._is_entered = True
return ret
def __exit__(self, exc_type, exc_val, exc_tb):
if self._is_entered:
self._is_entered = False
return self._exit(exc_type, exc_val, exc_tb)
class DisposableContext(NoReentrantContext):
"""
Base class for contexts which can only be entered once.
"""
_has_entered = False
def __enter__(self):
if self._has_entered:
raise RuntimeError(
'A disposable context cannot be entered twice: {!r}.'.
format(self))
ret = super(DisposableContext, self).__enter__()
self._has_entered = True
return ret
|
StarcoderdataPython
|
1662361
|
<filename>dailyproblems/__main__.py<gh_stars>0
from .problems import Problems, Node
obj = Problems()
# print(obj.day_one([1, 2, 3, 5, 5], 10))
# print(obj.day_two([1, 2, 3, 4, 5]))
e1 = Node('Monday')
e2 = Node('Tuesday')
e3 = Node('Wednesday')
e4 = Node('Thursday')
e5 = Node('Friday')
e1.nextval = e2
e2.nextval = e3
e3.nextval = e4
e4.nextval = e5
e3.traverse()
obj.day_three()
|
StarcoderdataPython
|
4957743
|
# -*- coding: UTF-8 -*-
#
# Copyright 2016 Metamarkets Group Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import csv
import os
import pandas
import pytest
from pandas.testing import assert_frame_equal
from pydruid.query import Query, QueryBuilder
from pydruid.utils import aggregators, filters, having, postaggregator
def create_query_with_results():
query = Query({}, "timeseries")
query.result = [
{
"result": {"value1": 1, "value2": "㬓"},
"timestamp": "2015-01-01T00:00:00.000-05:00",
},
{
"result": {"value1": 2, "value2": "㬓"},
"timestamp": "2015-01-02T00:00:00.000-05:00",
},
]
return query
EXPECTED_RESULTS_PANDAS = [
{"timestamp": "2015-01-01T00:00:00.000-05:00", "value1": 1, "value2": "㬓"},
{"timestamp": "2015-01-02T00:00:00.000-05:00", "value1": 2, "value2": "㬓"},
]
def expected_results_csv_reader():
# csv.DictReader does not perform promotion to int64
expected_results = []
for element in EXPECTED_RESULTS_PANDAS:
modified_elem = element.copy()
modified_elem.update({"value1": str(modified_elem["value1"])})
expected_results.append(modified_elem)
return expected_results
class TestQueryBuilder:
def test_build_query(self):
# given
expected_query_dict = {
"queryType": None,
"dataSource": "things",
"aggregations": [{"fieldName": "thing", "name": "count", "type": "count"}],
"postAggregations": [
{
"fields": [
{"fieldName": "sum", "type": "fieldAccess"},
{"fieldName": "count", "type": "fieldAccess"},
],
"fn": "/",
"name": "avg",
"type": "arithmetic",
}
],
"pagingSpec": {"pagingIdentifies": {}, "threshold": 1},
"filter": {"dimension": "one", "type": "selector", "value": 1},
"having": {"aggregation": "sum", "type": "greaterThan", "value": 1},
"new_key": "value",
}
builder = QueryBuilder()
# when
query = builder.build_query(
None,
{
"datasource": "things",
"aggregations": {"count": aggregators.count("thing")},
"post_aggregations": {
"avg": (postaggregator.Field("sum") / postaggregator.Field("count"))
},
"paging_spec": {"pagingIdentifies": {}, "threshold": 1},
"filter": filters.Dimension("one") == 1,
"having": having.Aggregation("sum") > 1,
"new_key": "value",
},
)
# then
assert query.query_dict == expected_query_dict
def test_build_query_none_type(self):
# given
expected_query_dict = {
"queryType": None,
"dataSource": "things",
"aggregations": [{"fieldName": "thing", "name": "count", "type": "count"}],
"filter": {"dimension": "one", "type": "selector", "value": 1},
"having": {"aggregation": "sum", "type": "greaterThan", "value": 1},
"dimension": "dim1",
}
builder = QueryBuilder()
# when
builder_dict = {
"datasource": "things",
"aggregations": {"count": aggregators.count("thing")},
"filter": filters.Dimension("one") == 1,
"having": having.Aggregation("sum") > 1,
"dimension": "dim1",
}
query = builder.build_query(None, builder_dict)
# then
assert query.query_dict == expected_query_dict
# you should be able to pass `None` to dimension/having/filter
for v in ["dimension", "having", "filter"]:
expected_query_dict[v] = None
builder_dict[v] = None
query = builder.build_query(None, builder_dict)
assert query.query_dict == expected_query_dict
def test_validate_query(self):
# given
builder = QueryBuilder()
# when
builder.validate_query(None, ["validkey"], {"validkey": "value"})
# then
pytest.raises(
ValueError,
builder.validate_query,
*[None, ["validkey"], {"invalidkey": "value"}]
)
def test_union_datasource(self):
# Given
expected_query_dict = {"queryType": None, "dataSource": "things"}
builder = QueryBuilder()
# when
builder_dict = {"datasource": "things"}
query = builder.build_query(None, builder_dict)
# then
assert query.query_dict == expected_query_dict
# Given
expected_query_dict = {
"queryType": None,
"dataSource": {
"type": "union",
"dataSources": ["things", "others", "more"],
},
}
builder = QueryBuilder()
# when
builder_dict = {"datasource": ["things", "others", "more"]}
query = builder.build_query(None, builder_dict)
# then
assert query.query_dict == expected_query_dict
# Given check that it rejects non-string items
builder = QueryBuilder()
builder_dict = {"datasource": ["things", 123]}
with pytest.raises(ValueError):
query = builder.build_query(None, builder_dict)
def test_build_subquery(self):
# given
expected_query_dict = {
"query": {
"queryType": "groupBy",
"dataSource": "things",
"aggregations": [
{"fieldName": "thing", "name": "count", "type": "count"}
],
"postAggregations": [
{
"fields": [
{"fieldName": "sum", "type": "fieldAccess"},
{"fieldName": "count", "type": "fieldAccess"},
],
"fn": "/",
"name": "avg",
"type": "arithmetic",
}
],
"filter": {"dimension": "one", "type": "selector", "value": 1},
"having": {"aggregation": "sum", "type": "greaterThan", "value": 1},
},
"type": "query",
}
builder = QueryBuilder()
# when
subquery_dict = builder.subquery(
{
"datasource": "things",
"aggregations": {"count": aggregators.count("thing")},
"post_aggregations": {
"avg": (postaggregator.Field("sum") / postaggregator.Field("count"))
},
"filter": filters.Dimension("one") == 1,
"having": having.Aggregation("sum") > 1,
}
)
# then
assert subquery_dict == expected_query_dict
class TestQuery:
def test_export_tsv(self, tmpdir):
query = create_query_with_results()
file_path = tmpdir.join("out.tsv")
query.export_tsv(str(file_path))
with open(str(file_path)) as tsv_file:
reader = csv.DictReader(tsv_file, delimiter="\t")
actual = [line for line in reader]
assert actual == expected_results_csv_reader()
def test_export_pandas(self):
query = create_query_with_results()
df = query.export_pandas()
expected_df = pandas.DataFrame(EXPECTED_RESULTS_PANDAS)
assert_frame_equal(df, expected_df, check_like=True)
query = Query({}, "timeseries")
df = query.export_pandas()
assert_frame_equal(df, pandas.DataFrame())
def test_query_acts_as_a_wrapper_for_raw_result(self):
# given
query = create_query_with_results()
# then
assert len(query) == 2
assert isinstance(query[0], dict)
assert isinstance(query[1], dict)
|
StarcoderdataPython
|
1685065
|
sequences = input().split("|")
numbers = [[int(el) for el in seq.split()] for seq in sequences ]
numbers.reverse()
numbers = [str(number) for seq in numbers for number in seq]
print(" ".join(numbers))
|
StarcoderdataPython
|
216857
|
from .active_lives import ActiveLivesValEMD
from .disabled_lives import DisabledLivesProjEMD, DisabledLivesValEMD
|
StarcoderdataPython
|
6672169
|
<gh_stars>0
#!/usr/bin/env python3
"""
Module that constructs the CLI, handles configuration files initialization,
and sets up logging.
"""
import logging
import sys
import click
from hopla.cli.add.todo import todo
from hopla.cli.authenticate import authenticate
from hopla.cli.buy.enchanted_armoire import enchanted_armoire
from hopla.cli.cast import cast
from hopla.cli.complete import complete
from hopla.cli.config import config
from hopla.cli.feed import feed
from hopla.cli.feed_all import feed_all
from hopla.cli.get_group import get_group
from hopla.cli.get_user.auth import auth
from hopla.cli.get_user.info import info
from hopla.cli.get_user.inventory import inventory
from hopla.cli.get_user.stats import stats
from hopla.cli.groupcmds.add import add
from hopla.cli.groupcmds.api import api
from hopla.cli.groupcmds.buy import buy
from hopla.cli.groupcmds.get_user import get_user
from hopla.cli.groupcmds.hatch import hatch
from hopla.cli.groupcmds.set import set # pylint: disable=redefined-builtin
from hopla.cli.hatch.quest_egg import quest_egg
from hopla.cli.hatch.standard_egg import standard_egg
from hopla.cli.hatch_all import hatch_all
from hopla.cli.request import request
from hopla.cli.support_development import support_development
from hopla.cli.version import version
from hopla.hoplalib.common import GlobalConstants
from hopla.hoplalib.configuration import ConfigInitializer, ConfigurationFileParser
from hopla.hoplalib.hoplaversion import HoplaVersion
def setup_logging() -> logging.Logger:
"""Setup python logging for the entire hopla project"""
parsed_loglevel: str = ConfigurationFileParser().get_full_config_name(
"cmd_all.loglevel",
fallback="info"
)
loglevel_mapping = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR
}
# https://docs.python.org/3.8/howto/logging.html#logging-basic-tutorial
logging.basicConfig(
format="[%(levelname)s][%(filename)s|%(asctime)s] %(message)s",
level=loglevel_mapping[parsed_loglevel],
datefmt="%Y-%m-%dT%H:%M:%S"
)
return logging.getLogger(__package__)
log = setup_logging()
log.debug(f"start application with arguments: {sys.argv}")
HOPLA_CONTEXT_SETTINGS = dict(
help_option_names=["-h", "--help"], # add -h
auto_envvar_prefix=GlobalConstants.APPLICATION_NAME
)
@click.group(context_settings=HOPLA_CONTEXT_SETTINGS)
@click.version_option(version=HoplaVersion().semantic_version())
def hopla():
"""hopla - a command line interface (CLI) to interact with habitica.com"""
def organize_cli() -> None:
"""Attach the subgroups and subcommands to the top hopla group command"""
# pylint: disable=too-many-statements
# authenticate
hopla.add_command(authenticate)
# add
hopla.add_command(add)
add.add_command(todo)
# api
hopla.add_command(api)
# buy
hopla.add_command(buy)
buy.add_command(enchanted_armoire)
# cast
hopla.add_command(cast)
# complete
hopla.add_command(complete)
# config
hopla.add_command(config)
# feed
hopla.add_command(feed)
# feed-all
hopla.add_command(feed_all)
# get-group
hopla.add_command(get_group)
# get-user
hopla.add_command(get_user)
get_user.add_command(inventory)
get_user.add_command(stats)
get_user.add_command(info)
get_user.add_command(auth)
# hatch
hopla.add_command(hatch)
hatch.add_command(standard_egg)
hatch.add_command(quest_egg)
# hatch-all
hopla.add_command(hatch_all)
# request
hopla.add_command(request)
# support-development
hopla.add_command(support_development)
# version
hopla.add_command(version)
# set
hopla.add_command(set)
def init_hopla_config_files() -> None:
"""Setup the config file."""
had_to_create_new_config_file: bool = ConfigInitializer().initialize_before_running_cmds()
if had_to_create_new_config_file is True:
click.echo(f"Thank you for trying out {GlobalConstants.APPLICATION_NAME}")
click.echo(
"Bug reports, pull requests, and feature requests are welcomed over at: "
)
click.echo(GlobalConstants.ISSUE_URL)
def kickstart_hopla() -> None:
"""Setup the config files, organize the CLI, and call the base command group."""
init_hopla_config_files()
organize_cli()
hopla()
|
StarcoderdataPython
|
11341074
|
<gh_stars>1-10
import numpy as np
from sklearn.metrics import mean_squared_error
class MSE():
"""Class for partitioning based on MSE between the mean and measured
values of the sample group.
"""
def __init__(self):
pass
def __call__(self, X, X_l, X_r, y, y_l, y_r):
return self._calc(X, y) - self._calc(X_l, y_l) - self._calc(X_r, y_r)
def _calc(self, X, y):
return np.sum(np.power(y - np.mean(y), 2))
class MSE_by_model():
"""Class for partitioning based on MSE between the predicted values of
the regression model applied to the sample group and measured values.
"""
def __init__(self, model):
self.model = model
def __call__(self, X, X_l, X_r, y, y_l, y_r):
return self._calc(X, y) - self._calc(X_l, y_l) - self._calc(X_r, y_r)
def _calc(self, X, y):
self.model.fit(X, y)
return mean_squared_error(y, self.model.predict(X))
|
StarcoderdataPython
|
1767608
|
<gh_stars>0
from .users import SignIn,SignUp, UsersApi
from .trips import (
TripApi,
TripsApi,
JoinTripApi,
UpdateCoordinatesAPI,
GetCoordinatesAPI,
)
def initialize_routes(api):
# Users API
api.add_resource(UsersApi, "/api/users")
api.add_resource(SignIn, "/api/users/sign-in/<username>/<password>")
api.add_resource(SignUp, "/api/users/sign-up/<username>/<password>")
#api.add_resource(Userget, "/api/users/<userID>")
# Trips API
api.add_resource(TripsApi, "/api/trips")
api.add_resource(TripApi, "/api/trips/<tripId>")
api.add_resource(JoinTripApi, "/api/trips/<tripId>/join")
api.add_resource(UpdateCoordinatesAPI, "/api/trips/<tripId>/update-coordinates")
api.add_resource(GetCoordinatesAPI, "/api/trips/<tripId>/get-coordinates")
|
StarcoderdataPython
|
9704796
|
class ViliParser:
spacing = 4
|
StarcoderdataPython
|
144359
|
<filename>tests/framework/unit_tests/TSA/testFourier.py
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This Module performs Unit Tests for the TSA.Fourier class.
It can not be considered part of the active code but of the regression test system
"""
import os
import sys
import copy
import numpy as np
# add RAVEN to path
frameworkDir = os.path.abspath(os.path.join(*([os.path.dirname(__file__)] + [os.pardir]*4 + ['framework'])))
if frameworkDir not in sys.path:
sys.path.append(frameworkDir)
from utils.utils import find_crow
find_crow(frameworkDir)
from utils import xmlUtils
from TSA import Fourier
plot = False
print('Module undergoing testing:')
print(Fourier)
print('')
results = {"pass":0,"fail":0}
def checkFloat(comment, value, expected, tol=1e-10, update=True):
"""
This method is aimed to compare two floats given a certain tolerance
@ In, comment, string, a comment printed out if it fails
@ In, value, float, the value to compare
@ In, expected, float, the expected value
@ In, tol, float, optional, the tolerance
@ In, update, bool, optional, if False then don't update results counter
@ Out, res, bool, True if same
"""
if np.isnan(value) and np.isnan(expected):
res = True
elif np.isnan(value) or np.isnan(expected):
res = False
else:
res = abs(value - expected) <= tol
if update:
if not res:
print("checking float",comment,'|',value,"!=",expected)
results["fail"] += 1
else:
results["pass"] += 1
return res
def checkTrue(comment, res, update=True):
"""
This method is a pass-through for consistency and updating
@ In, comment, string, a comment printed out if it fails
@ In, res, bool, the tested value
@ In, update, bool, optional, if False then don't update results counter
@ Out, res, bool, True if test
"""
if update:
if res:
results["pass"] += 1
else:
print("checking bool",comment,'|',res,'is not True!')
results["fail"] += 1
return res
def checkSame(comment, value, expected, update=True):
"""
This method is aimed to compare two identical things
@ In, comment, string, a comment printed out if it fails
@ In, value, float, the value to compare
@ In, expected, float, the expected value
@ In, update, bool, optional, if False then don't update results counter
@ Out, res, bool, True if same
"""
res = value == expected
if update:
if res:
results["pass"] += 1
else:
print("checking string",comment,'|',value,"!=",expected)
results["fail"] += 1
return res
def checkArray(comment, first, second, dtype, tol=1e-10, update=True):
"""
This method is aimed to compare two arrays
@ In, comment, string, a comment printed out if it fails
@ In, value, float, the value to compare
@ In, expected, float, the expected value
@ In, tol, float, optional, the tolerance
@ In, update, bool, optional, if False then don't update results counter
@ Out, res, bool, True if same
"""
res = True
if len(first) != len(second):
res = False
print("checking answer",comment,'|','lengths do not match:',len(first),len(second))
else:
for i in range(len(first)):
if dtype == float:
pres = checkFloat('',first[i],second[i],tol,update=False)
elif dtype in (str,unicode):
pres = checkSame('',first[i],second[i],update=False)
if not pres:
print('checking array',comment,'|','entry "{}" does not match: {} != {}'.format(i,first[i],second[i]))
res = False
if update:
if res:
results["pass"] += 1
else:
results["fail"] += 1
return res
def checkNone(comment, entry, update=True):
"""
Checks if entry is None.
@ In, comment, string, a comment printed out if it fails
@ In, entry, object, to test if against None
@ In, update, bool, optional, if False then don't update results counter
@ Out, res, bool, True if None
"""
res = entry is None
if update:
if res:
results["pass"] += 1
else:
print("checking answer",comment,'|','"{}" is not None!'.format(entry))
results["fail"] += 1
def checkFails(comment, errstr, function, update=True, args=None, kwargs=None):
"""
Checks if expected error occurs
@ In, comment, string, a comment printed out if it fails
@ In, errstr, str, expected fail message
@ In, function, method, method to run to test for failure
@ In, update, bool, optional, if False then don't update results counter
@ In, args, list, arguments to pass to function
@ In, kwargs, dict, keyword arguments to pass to function
@ Out, res, bool, True if failed as expected
"""
print('Error testing ...')
if args is None:
args = []
if kwargs is None:
kwargs = {}
try:
function(*args,**kwargs)
res = False
msg = 'Function call did not error!'
except Exception as e:
res = checkSame('',e.args[0],errstr,update=False)
if not res:
msg = 'Unexpected error message. \n Received: "{}"\n Expected: "{}"'.format(e.args[0],errstr)
if update:
if res:
results["pass"] += 1
print(' ... end Error testing (PASSED)')
else:
print("checking error",comment,'|',msg)
results["fail"] += 1
print(' ... end Error testing (FAILED)')
print('')
return res
######################################
# CONSTRUCTION #
######################################
def createFourierXML(targets, periods):
xml = xmlUtils.newNode('Fourier', attrib={'target':','.join(targets)})
xml.append(xmlUtils.newNode('periods', text=','.join(str(k) for k in periods)))
return xml
def createFromXML(xml):
fourier = Fourier()
inputSpec = Fourier.getInputSpecification()()
inputSpec.parseNode(xml)
fourier.handleInput(inputSpec)
return fourier
def createFourier(targets, periods):
xml = createFourierXML(targets, periods)
fourier = createFromXML(xml)
return fourier
def createFourierSignal(amps, periods, phases, pivot, intercept=0, plot=False):
if plot:
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
signal = np.zeros(len(pivot)) + intercept
for k, period in enumerate(periods):
new = amps[k] * np.sin(2 * np.pi / period * pivot + phases[k])
if plot:
ax.plot(pivot, new, ':')
signal += new
if plot:
ax.plot(pivot, signal, 'k-')
plt.show()
return signal
###################
# Simple #
###################
# generate signal
targets = ['A', 'B', 'C']
pivot = np.arange(100) / 10.
periods = [2, 5, 10]
amps = [0.5, 1, 2]
phasesA = [0, np.pi, 0]
signalA = createFourierSignal(amps, periods, phasesA, pivot, plot=plot)
phasesB = [np.pi, 0, np.pi/4]
signalB = createFourierSignal(amps, periods, phasesB, pivot, plot=plot)
phasesC = [np.pi, np.pi/4, -np.pi/4]
interceptC = 2
signalC = createFourierSignal(amps, periods, phasesC, pivot, intercept=interceptC, plot=plot)
signals = np.zeros((len(pivot), 3))
signals[:, 0] = signalA
signals[:, 1] = signalB
signals[:, 2] = signalC
fourier = createFourier(targets, periods)
params = fourier.characterize(signals, pivot, targets)
# intercepts
checkFloat('Signal A intercept', params['A']['intercept'], 0)
checkFloat('Signal B intercept', params['B']['intercept'], 0)
checkFloat('Signal C intercept', params['C']['intercept'], interceptC)
# amplitudes
checkFloat('Signal A period 0 amplitude', params['A']['coeffs'][periods[0]]['amplitude'], amps[0])
checkFloat('Signal A period 1 amplitude', params['A']['coeffs'][periods[1]]['amplitude'], amps[1])
checkFloat('Signal A period 2 amplitude', params['A']['coeffs'][periods[2]]['amplitude'], amps[2])
checkFloat('Signal B period 0 amplitude', params['B']['coeffs'][periods[0]]['amplitude'], amps[0])
checkFloat('Signal B period 1 amplitude', params['B']['coeffs'][periods[1]]['amplitude'], amps[1])
checkFloat('Signal B period 2 amplitude', params['B']['coeffs'][periods[2]]['amplitude'], amps[2])
checkFloat('Signal C period 0 amplitude', params['C']['coeffs'][periods[0]]['amplitude'], amps[0])
checkFloat('Signal C period 1 amplitude', params['C']['coeffs'][periods[1]]['amplitude'], amps[1])
checkFloat('Signal C period 2 amplitude', params['C']['coeffs'][periods[2]]['amplitude'], amps[2])
# phases
# check absolute value of phase pi since -pi and pi are often converged on separately
checkFloat('Signal A period 0 phase', params['A']['coeffs'][periods[0]]['phase'] , phasesA[0])
checkFloat('Signal A period 1 phase', abs(params['A']['coeffs'][periods[1]]['phase']), phasesA[1])
checkFloat('Signal A period 2 phase', params['A']['coeffs'][periods[2]]['phase'] , phasesA[2])
checkFloat('Signal B period 0 phase', abs(params['B']['coeffs'][periods[0]]['phase']), phasesB[0])
checkFloat('Signal B period 1 phase', params['B']['coeffs'][periods[1]]['phase'] , phasesB[1])
checkFloat('Signal B period 2 phase', params['B']['coeffs'][periods[2]]['phase'] , phasesB[2])
checkFloat('Signal C period 0 phase', abs(params['C']['coeffs'][periods[0]]['phase']), phasesC[0])
checkFloat('Signal C period 1 phase', params['C']['coeffs'][periods[1]]['phase'] , phasesC[1])
checkFloat('Signal C period 2 phase', params['C']['coeffs'][periods[2]]['phase'] , phasesC[2])
# recreate signals
res = fourier.generate(params, pivot, None)
for tg, target in enumerate(targets):
checkArray(f'Signal {target} replication', res[:, tg], signals[:, tg], float)
##### now redo with non-simultaneous fitting
params = fourier.characterize(signals, pivot, targets, simultFit=False)
# intercepts
checkFloat('Signal A intercept', params['A']['intercept'], 0)
checkFloat('Signal B intercept', params['B']['intercept'], 0)
checkFloat('Signal C intercept', params['C']['intercept'], interceptC)
# amplitudes
checkFloat('Signal A period 0 amplitude', params['A']['coeffs'][periods[0]]['amplitude'], amps[0])
checkFloat('Signal A period 1 amplitude', params['A']['coeffs'][periods[1]]['amplitude'], amps[1])
checkFloat('Signal A period 2 amplitude', params['A']['coeffs'][periods[2]]['amplitude'], amps[2])
checkFloat('Signal B period 0 amplitude', params['B']['coeffs'][periods[0]]['amplitude'], amps[0])
checkFloat('Signal B period 1 amplitude', params['B']['coeffs'][periods[1]]['amplitude'], amps[1])
checkFloat('Signal B period 2 amplitude', params['B']['coeffs'][periods[2]]['amplitude'], amps[2])
checkFloat('Signal C period 0 amplitude', params['C']['coeffs'][periods[0]]['amplitude'], amps[0])
checkFloat('Signal C period 1 amplitude', params['C']['coeffs'][periods[1]]['amplitude'], amps[1])
checkFloat('Signal C period 2 amplitude', params['C']['coeffs'][periods[2]]['amplitude'], amps[2])
# phases
# check absolute value of phase pi since -pi and pi are often converged on separately
checkFloat('Signal A period 0 phase', params['A']['coeffs'][periods[0]]['phase'] , phasesA[0])
checkFloat('Signal A period 1 phase', abs(params['A']['coeffs'][periods[1]]['phase']), phasesA[1])
checkFloat('Signal A period 2 phase', params['A']['coeffs'][periods[2]]['phase'] , phasesA[2])
checkFloat('Signal B period 0 phase', abs(params['B']['coeffs'][periods[0]]['phase']), phasesB[0])
checkFloat('Signal B period 1 phase', params['B']['coeffs'][periods[1]]['phase'] , phasesB[1])
checkFloat('Signal B period 2 phase', params['B']['coeffs'][periods[2]]['phase'] , phasesB[2])
checkFloat('Signal C period 0 phase', abs(params['C']['coeffs'][periods[0]]['phase']), phasesC[0])
checkFloat('Signal C period 1 phase', params['C']['coeffs'][periods[1]]['phase'] , phasesC[1])
checkFloat('Signal C period 2 phase', params['C']['coeffs'][periods[2]]['phase'] , phasesC[2])
# recreate signals
res = fourier.generate(params, pivot, None)
for tg, target in enumerate(targets):
checkArray(f'Signal {target} replication', res[:, tg], signals[:, tg], float)
# check residual
# -> generate random noise to add to signal, then check it is returned in residual
r = np.random.rand(pivot.size, len(targets))
new = r + signals
res = fourier.getResidual(new, params, pivot, None)
for tg, target in enumerate(targets):
checkArray(f'Signal {target} residual', res[:, tg], r[:, tg], float)
print(results)
sys.exit(results["fail"])
"""
<TestInfo>
<name>framework.unit_tests.TSA.Fourier</name>
<author>talbpaul</author>
<created>2021-01-05</created>
<classesTested>TSA.Fourier</classesTested>
<description>
This test is a Unit Test for the Fourier TimeSeriesAnalyzer classes.
</description>
</TestInfo>
"""
|
StarcoderdataPython
|
8159021
|
<filename>test/scrapy.py<gh_stars>0
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
import Queue
initial_page = "http://www.zhugelicai.com"
url_queue = Queue.Queue()
seen = set()
seen.insert(initial_page)
url_queue.put(initial_page)
while(True): # 一直进行直到海枯石烂
if url_queue.size() > 0:
current_url = url_queue.get() # 拿出队例中第一个的url
store(current_url) # 把这个url代表的网页存储好
for next_url in extract_urls(current_url): # 提取把这个url里链向的url
if next_url not in seen:
seen.put(next_url)
url_queue.put(next_url)
else:
break
|
StarcoderdataPython
|
11295512
|
<reponame>kharrigian/pitchers-and-pianists<filename>scripts/tap_processing_stage_1.py
## In Stage 1 of proessing, we manually check time series and throw out bad data (sensor malfunction, forgetting the task)
###############################
### Imports
###############################
# Standard I/O and Data Handling
import pandas as pd
import numpy as np
import os, glob, sys
import datetime
import copy
import pickle
# Signal Processing
import scipy.io as sio
# Data Loading
from scripts.libraries.helpers import load_tapping_data
from scripts.libraries.helpers import load_pickle, dump_pickle
# Plotting
import matplotlib.pyplot as plt
###############################
### Globals
###############################
# Main Data Directory
data_dir = "./data/"
# Tapping Data Directory
tapping_data_dir = data_dir + "tapping/"
# Tapping Filenames
tapping_filenames = glob.glob(tapping_data_dir + "*/*")
tapping_filenames = [tapping_filenames[i] for i in np.argsort([int(t.split("/")[-1].replace(".mat","")) for t in tapping_filenames])]
tapping_subjects = [int(t.split("/")[-1].split(".mat")[0]) for t in tapping_filenames]
# Survey Data Filename
survey_data_filename = data_dir + "survey.csv"
# Store Inspected File
eye_checks = {}
eye_check_store = data_dir + "manual_inspection.pickle"
if os.path.exists(eye_check_store):
eye_checks = load_pickle(eye_check_store)
###############################
### Survey Data
###############################
# Load Survey Data
survey_data = pd.read_csv(survey_data_filename)
# Missing Survey Subjects
missing_survey_data = [t for t in tapping_subjects if int(t) not in survey_data.Subject.values]
# Add Flag re: Tapping Participation
survey_data["valid_tapping_participant"] = survey_data.Subject.map(lambda i: i in tapping_subjects)
# Describe Arbitrary Dataset
def describe_subject_pool(survey_df):
n_sub = len(survey_df.Subject.unique())
female_percent = survey_df.Gender.value_counts(normalize=True)["F"] * 100
mean_age = survey_df.Age.mean()
std_age = survey_df.Age.std()
return """%s subjects (%.2f%% female, %.1f+-%.1f years old) """ % (n_sub, female_percent, mean_age, std_age)
print("Entire Study: %s" % describe_subject_pool(survey_data))
print("Tapping Study: %s" % describe_subject_pool(survey_data.loc[survey_data.valid_tapping_participant]))
###############################
### Bad Data Filtering (manual)
###############################
# Stage 1 (Bad Trial Plots)
stage_1_dir = "./plots/stage_1/"
if not os.path.exists(stage_1_dir):
os.mkdir(stage_1_dir)
# Check Each Subject
for file in tapping_filenames:
# Load in the Data
subject_data = load_tapping_data(file)
# Split out Data and Format
subject = file.split("/")[-1].replace(".mat","")
force_signal = subject_data["trial_force"]
trial_data = pd.DataFrame(force_signal.T)
trial_data.index = trial_data.index / 2000
# If subject didn't fill out survey, ignore
if int(subject) in missing_survey_data:
eye_checks[file] = "missing_survey_data"
# Check subjects that haven't been inspected or were discarded
if file not in eye_checks or (file in eye_checks and eye_checks[file] != "pass"):
# Create Plot
fig, ax = plt.subplots(figsize = (14,8), sharex = True)
trial_data.plot(subplots = True, layout = (2,3), color = "blue", linestyle = "-", alpha = .8,
linewidth = 1, ax = ax, legend = False)
fig.tight_layout()
fig.subplots_adjust(top=.94)
fig.suptitle("Subject: %s" % subject, y = .98)
# If already inspected, continue
if file not in eye_checks:
plt.show(block=False)
# Manually decide whether to keep or discard
keep_or_discard = input("Discard? ")
if len(keep_or_discard) == 0:
keep_or_discard == "pass"
# Otherwise, load decision
else:
keep_or_discard = eye_checks[file]
# Store Decision
eye_checks[file] = keep_or_discard
# Save plot if discarded
if keep_or_discard != "pass":
plt.savefig(stage_1_dir + "%s_%s.png" % (subject, keep_or_discard))
# Close Plot
plt.close("all")
# Save Inspection
dump_pickle(eye_checks, eye_check_store)
###############################
### Analyze Thrown Out Data
###############################
# Create DF
eye_check_df = pd.Series(eye_checks).reset_index().rename(columns = {"index":"file",0:"decision"})
# Absolute Thrown Out (18 out 338)
n_good_subjects = eye_check_df.decision.value_counts()["pass"]
n_bad_subjcts = len(eye_check_df) - n_good_subjects
print("%s/%s subjects thrown out immediately" % (n_bad_subjcts, len(eye_check_df)))
# Merge Demographics
eye_check_df["subject"] = eye_check_df["file"].map(lambda i: i.split("/")[-1].replace(".mat","")).astype(int)
eye_check_df = pd.merge(eye_check_df, survey_data[["Subject","Age","Gender"]], left_on = ["subject"], right_on = ["Subject"], how = "left")
# Isolate Thrown Out Subjects
thrown_out_df = eye_check_df.loc[eye_check_df.decision != "pass"]
# Thrown-out Age Distribution (non-sensor related <= 15 years old)
print("Thrown Out Age Distributions")
print(thrown_out_df.groupby(["decision"]).Age.value_counts())
# Valid Subject Pool
print(describe_subject_pool(eye_check_df.loc[eye_check_df.decision == "pass"]))
###############################
### Notes
###############################
"""
We begin data analysis with 336 subject data files. However, we immediately throw out
1 subject who did not complete a survey at all (subject 105).
Then we throw out an additional 17 subjects for the following reasons:
* forget (3): the subject forgot to continue tapping after the metronome ended in more than 2 trials
* style (2): the subject tapped too lightly or pressed in an abnormal fashion on the sensor
* sensor (12): the subjects data was corrupted by a sensor malfunction and we do not expect to recover taps correctly
This leaves us with 318 subjects for which to process (identify taps, run analysis, etc.). Their summary is as follow:
318 subjects (54.09% female, 26.0+-14.4 years old)
"""
|
StarcoderdataPython
|
5084175
|
<filename>src/sage/combinat/subsets_pairwise.py
r"""
Subsets whose elements satisfy a predicate pairwise
"""
#*****************************************************************************
# Copyright (C) 2011 <NAME> <nthiery at users.sf.net>
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
#******************************************************************************
from sage.categories.finite_enumerated_sets import FiniteEnumeratedSets
from sage.sets.set import Set_object_enumerated
from sage.combinat.backtrack import SearchForest
from sage.combinat.subset import Subsets
class PairwiseCompatibleSubsets(SearchForest):
r"""
The set of all subsets of ``ambient`` whose elements satisfy
``predicate`` pairwise
INPUT:
- ``ambient`` -- a set (or iterable)
- ``predicate`` -- a binary predicate
Assumptions: ``predicate`` is symmetric (``predicate(x,y) ==
predicate(y,x)``) and reflexive (``predicate(x,x) == True``).
.. note:: in fact, ``predicate(x,x)`` is never called.
.. warning:: The current name is suboptimal and is subject to
change. Suggestions for a good name, and a good user entry
point are welcome. Maybe ``Subsets(..., independent = predicate)``.
EXAMPLES:
We construct the set of all subsets of `\{4,5,6,8,9\}` whose
elements are pairwise relatively prime::
sage: from sage.combinat.subsets_pairwise import PairwiseCompatibleSubsets
sage: def predicate(x,y): return gcd(x,y) == 1
sage: P = PairwiseCompatibleSubsets( [4,5,6,8,9], predicate); P
An enumerated set with a forest structure
sage: P.list()
[{}, {4}, {4, 5}, {9, 4, 5}, {9, 4}, {5}, {5, 6}, {8, 5}, {8, 9, 5}, {9, 5}, {6}, {8}, {8, 9}, {9}]
sage: P.cardinality()
14
sage: P.category()
Category of finite enumerated sets
Here we consider only those subsets which are maximal for
inclusion (not yet implemented)::
sage: P = PairwiseCompatibleSubsets( [4,5,6,8,9], predicate, maximal = True); P
An enumerated set with a forest structure
sage: P.list() # todo: not implemented
[{9, 4, 5}, {5, 6}, {8, 9, 5}]
sage: P.cardinality() # todo: not implemented
14
sage: P.category()
Category of finite enumerated sets
.. rubric:: Algorithm
In the following, we order the elements of the ambient set by
order of apparition. The elements of ``self`` are generated by
organizing them in a search tree. Each node of this tree is of the
form ``(subset, rest)``, where:
- ``subset`` represents an element of ``self``, represented
by an increasing tuple
- ``rest`` is the set of all `y`'s such that `y` appears
after `x` in the ambient set and ``predicate(x,y)``
holds, represented by a decreasing tuple
The root of this tree is ``( (), ambient )``. All the other elements
are generated by recursive depth first search, which gives
lexicographic order.
"""
#@staticmethod
#def __classcall__(cls, ambient, predicate):
# ambient = Set(ambient)
# return super(PairwiseCompatibleSubsets, cls).__classcall__(cls, ambient, predicate)
__len__ = None
def __init__(self, ambient, predicate, maximal = False, element_class = Set_object_enumerated):
"""
TESTS::
sage: from sage.combinat.subsets_pairwise import PairwiseCompatibleSubsets
sage: def predicate(x,y): return gcd(x,y) == 1
sage: P = PairwiseCompatibleSubsets( [4,5,6,8,9], predicate); P
An enumerated set with a forest structure
sage: import __main__; __main__.predicate = predicate
sage: TestSuite(P).run()
"""
self._ambient = set(ambient)
self._roots = ( ((), tuple(reversed(ambient))), )
self._predicate = predicate
self._maximal = maximal
# TODO: use self.element_class for consistency
# At this point (2011/03) TestSuite fails if we do so
self._element_class = element_class
SearchForest.__init__(self, algorithm = 'depth', category = FiniteEnumeratedSets())
def __eq__(self, other):
"""
Equality test; not really useful, but this pleases pickling ...
TESTS::
sage: from sage.combinat.subsets_pairwise import PairwiseCompatibleSubsets
sage: def predicate(x,y): return gcd(x,y) == 1
sage: P = PairwiseCompatibleSubsets( [4,5,6,8,9], predicate); P
An enumerated set with a forest structure
sage: P == P
True
"""
return self.__class__ is other.__class__ and self._ambient == other._ambient and self._predicate == other._predicate
def __contains__(self, subset):
"""
Membership testing
Returns whether subset is a subset of ``self._ambient``, and
``predicate(x,y)`` holds for every ``x,y`` in ``self``.
EXAMPLES::
sage: from sage.combinat.subsets_pairwise import PairwiseCompatibleSubsets
sage: def predicate(x,y): return gcd(x,y) == 1
sage: P = PairwiseCompatibleSubsets( [4,5,6,8,9], predicate); P
An enumerated set with a forest structure
sage: Set([5,8,9]) in P
True
sage: Set([5,8,11]) in P
False
sage: Set([4,6]) in P
False
"""
return isinstance(subset, self._element_class ) and \
set(subset).issubset(self._ambient) and \
all( self._predicate(x,y) for x,y in Subsets(subset,2) )
def post_process(self, subset_rest):
"""
TESTS::
sage: from sage.combinat.subsets_pairwise import PairwiseCompatibleSubsets
sage: def predicate(x,y): return gcd(x,y) == 1
sage: P = PairwiseCompatibleSubsets( [4,5,6,8,9], predicate); P
An enumerated set with a forest structure
sage: P.post_process( ((4,5), (9)) )
{4, 5}
sage: P.post_process( ((4,5), ()) )
{4, 5}
"""
return self._element_class(subset_rest[0])
def children(self, subset_rest):
"""
Returns the children of a node in the tree.
TESTS::
sage: from sage.combinat.subsets_pairwise import PairwiseCompatibleSubsets
sage: def predicate(x,y): return gcd(x,y) == 1
sage: P = PairwiseCompatibleSubsets( [3,5,7,11,14], predicate); P
An enumerated set with a forest structure
sage: list(P.children( ((3,5), [14,11,7]) ))
[((3, 5, 7), (11,)), ((3, 5, 11), (14,)), ((3, 5, 14), ())]
"""
(subset, rest) = subset_rest
predicate = self._predicate
result = []
rest = list(rest)
while rest:
x = rest.pop()
result.append((subset+(x,), tuple( y for y in rest if predicate(x,y) )))
return result
|
StarcoderdataPython
|
4977514
|
#The MIT License
#
#Copyright (c) 2017 DYNI machine learning & bioacoustics team - Univ. Toulon
#
#Permission is hereby granted, free of charge, to any person obtaining a copy of
#this software and associated documentation files (the "Software"), to deal in
#the Software without restriction, including without limitation the rights to
#use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
#the Software, and to permit persons to whom the Software is furnished to do so,
#subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
#FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
#COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
#IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
#CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from enum import Enum
from scipy.signal import hann
from dynibatch.utils.exceptions import DynibatchError
class WindowType(Enum):
rect = 0
hanning = 1
def window(win_type, size):
"""Return a precalculated window with a defined size and type
Args:
win_type (WindowType): type of the window wanted
size (int): size of the window
Returns:
a precalculated (win_type) window with (size) as size
"""
if win_type == WindowType.hanning:
# use asymetric window (https://en.wikipedia.org/wiki/Window_function#Symmetry)
return hann(size, sym=False)
else:
raise DynibatchError("Window type {} is not defined".format(win_type))
|
StarcoderdataPython
|
6439632
|
from __future__ import unicode_literals
import django
from django.test import TestCase
from job.configuration.interface.scale_file import ScaleFileDescription
class TestScaleFileDescriptionMediaTypeAllowed(TestCase):
def setUp(self):
django.setup()
def test_accept_all(self):
"""Tests calling ScaleFileDescription.is_media_type_allowed() when accepting all media types."""
self.assertTrue(ScaleFileDescription().is_media_type_allowed('application/json'))
self.assertTrue(ScaleFileDescription().is_media_type_allowed('application/x-some-crazy-thing'))
def test_accept_specific(self):
"""Tests calling ScaleFileDescription.is_media_type_allowed() when accepting specific media types."""
file_desc = ScaleFileDescription()
file_desc.add_allowed_media_type(None) # Don't blow up
file_desc.add_allowed_media_type('application/json')
file_desc.add_allowed_media_type('text/plain')
self.assertTrue(file_desc.is_media_type_allowed('application/json'))
self.assertTrue(file_desc.is_media_type_allowed('text/plain'))
self.assertFalse(file_desc.is_media_type_allowed('application/x-some-crazy-thing'))
|
StarcoderdataPython
|
111575
|
<gh_stars>0
# Generated by Django 3.0.7 on 2020-07-18 10:11
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('to_do', '0009_auto_20200718_1523'),
]
operations = [
migrations.CreateModel(
name='Last_Date',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('last_date', models.TextField(default='yo', null=True)),
],
),
migrations.RemoveField(
model_name='todo',
name='last_date',
),
migrations.AddField(
model_name='todo',
name='pub_date',
field=models.DateTimeField(default=datetime.datetime(2020, 7, 18, 10, 11, 57, 568311, tzinfo=utc), null=True),
),
]
|
StarcoderdataPython
|
1734242
|
<gh_stars>1-10
import subprocess
import os
import argparse
import time
DATA_LOC='/home/boubin/Images/'
CUBE_LOC='/home/boubin/SoftwarePilot/DistributedRL/Data/'
def consoleLog(string):
print("#################################################")
print("##############DRL Controller:")
print(string)
print("#################################################")
#Start HDFS
def startHDFS():
consoleLog("DRL Controller Loading HDFS Containers")
pwd = os.<PASSWORD>()
os.chdir('../docker-hadoop')
subprocess.call(['docker-compose','up', '-d'])
os.chdir(pwd)
consoleLog("HDFS Loaded")
#Start Servers
def startServers(numServs):
serverList = []
pwd = os.getcwd()
os.chdir('../Gateway')
for i in range(0, numServs):
#subprocess.call(['docker', 'run', '--net=host', '-e SERVERNUM='+str(i),
# '--name','server'+str(i), 'spcn', '/bin/bash'])
subprocess.call(['bash','runGateway_Sim.sh', str(i),'server'+str(i)])
consoleLog("Server" + str(i)+" Started")
serverList.append("server"+str(i))
os.chdir(pwd)
return serverList
def startAggregators(numAgs):
aggList = []
pwd = os.getcwd()
os.chdir('../Aggregator')
for i in range(numAgs):
models = '{:04b}'.format(i)
subprocess.call(['bash', 'runAggregator_Sim.sh', models, 'Aggregator'+str(models)])
consoleLog("Aggregator "+str(models)+" Started")
os.chdir(pwd)
return aggList
#Start workers and servers
def startWorkers(numServs, numWorkers):
workerList = []
pwd = os.getcwd()
os.chdir('../Worker')
for i in range(0, numServs):
for j in range(0,numWorkers):
#subprocess.call(['docker', 'run', '--net=host',
# '-e SERVERNUM='+str(i),'-e WORKERNUM='+str(j),
# '--name','worker'+str(i)+'_'+str(j),
# '-v',CUBE_LOC+"Worker"+str(i)+'_'+str(j) + ':/home/mydata:Z',
# '-v',DATA_LOC + ':/home/imageData:Z',
# 'spen', '/bin/bash', '-c \"bash run.sh\"'])
subprocess.call(['bash','runWorker_Sim.sh',str(i), str(j),"worker"+str(i)+'_'+str(j)])
consoleLog("Worker" + str(i)+'_'+str(j)+" Started")
workerList.append("worker"+str(i)+'_'+str(j))
os.chdir(pwd)
return workerList
def startGlobal():
pwd = os.getcwd()
os.chdir("../Global")
subprocess.call(['bash','runGlobal_Sim.sh',"global"])
consoleLog("Global Started")
#start simulation
#Stop Servers
def stopServers(serverList):
for server in serverList:
subprocess.call(["docker", "rm", "-f", server])
consoleLog("Server " + server + " Stopped")
#Stop Workers
def stopWorkers(workerList):
for worker in workerList:
subprocess.call(["docker", "rm", "-f", worker])
consoleLog("Worker " + worker + " Stopped")
def stopGlobal():
subprocess.call(["docker","rm","-f","global"])
consoleLog("Global Stopped")
#kill HDFS
def killHDFS():
consoleLog("Exiting Simulation")
consoleLog("Killing HDFS")
os.chdir('../docker-hadoop')
subprocess.call(['docker-compose', 'down'])
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Distributed RL Controller')
#parser.add_argument('servers', metavar='S', type=int, nargs='+', help='Number of Server Nodes to Start')
#parser.add_argument('workers', metavar='W', type=int, nargs='+', help='Number of Worker Nodes to Start per Server')
#args = parser.parse_args()
consoleLog("DRL Controller Starting")
#Start HDFS Docker cluster
consoleLog("Starting HDFS Cluster")
#Currently Assuming HDFS instance runs independently of DRL Controller
#startHDFS()
consoleLog("Starting Global")
startGlobal()
consoleLog("Starting Aggregators")
serverList = startAggregators(16)
consoleLog("Starting Workers")
workerList = startWorkers(2, 2)
#Run Simulation
#time.sleep(3600)
#Stop Servers
#consoleLog("Stopping Global")
#stopGlobal()
#consoleLog("Stopping Servers")
#stopServers(serverList);
#consoleLog("Stopping Workers")
#stopWorkers(workerList)
#consoleLog("Killing HDFS")
#killHDFS();
|
StarcoderdataPython
|
6688672
|
<reponame>ammsa23/dials<filename>tests/command_line/test_slice_sequence.py
from __future__ import annotations
import os
import procrunner
import pytest
from dxtbx.serialize import load
from dials.array_family import flex
def test_slice_sequence_and_compare_with_expected_results(dials_regression, tmpdir):
# use the i04_weak_data for this test
data_dir = os.path.join(dials_regression, "refinement_test_data", "i04_weak_data")
experiments_path = os.path.join(data_dir, "experiments.json")
pickle_path = os.path.join(data_dir, "indexed_strong.pickle")
for pth in (experiments_path, pickle_path):
assert os.path.exists(pth)
result = procrunner.run(
["dials.slice_sequence", experiments_path, pickle_path, "image_range=1 20"],
working_directory=tmpdir,
)
assert not result.returncode and not result.stderr
# load results
sliced_exp = load.experiment_list(
tmpdir.join("experiments_1_20.expt").strpath, check_format=False
)[0]
sliced_refs = flex.reflection_table.from_file(tmpdir / "indexed_strong_1_20.refl")
# simple test of results
assert sliced_exp.scan.get_image_range() == (1, 20)
assert len(sliced_refs) == 3670
def test_slice_sequence_with_first_images_missing(dials_regression, tmpdir):
"""Test slicing where scan image range does not start at 1, exercising
a case that exposed a bug"""
# use the i04_weak_data for this test
data_dir = os.path.join(dials_regression, "refinement_test_data", "i04_weak_data")
experiments_path = os.path.join(data_dir, "experiments.json")
# first slice
result = procrunner.run(
["dials.slice_sequence", experiments_path, "image_range=5,20"],
working_directory=tmpdir,
)
assert not result.returncode and not result.stderr
# second slice
result = procrunner.run(
["dials.slice_sequence", "experiments_5_20.expt", "image_range=10,20"],
working_directory=tmpdir,
)
assert not result.returncode and not result.stderr
sliced_exp = load.experiment_list(
tmpdir.join("experiments_5_20_10_20.expt").strpath, check_format=False
)[0]
assert sliced_exp.scan.get_image_range() == (10, 20)
assert sliced_exp.scan.get_array_range() == (9, 20)
assert sliced_exp.scan.get_oscillation()[0] == pytest.approx(83.35)
def test_slice_sequence_to_degree_blocks(dials_data, tmpdir):
"""Slice data into 10 degree blocks i.e. 17 datasets"""
expt = dials_data("l_cysteine_4_sweeps_scaled") / "scaled_30.expt"
refl = dials_data("l_cysteine_4_sweeps_scaled") / "scaled_30.refl"
procrunner.run(
[
"dials.slice_sequence",
"block_size=10",
"output.experiments=sliced.expt",
"output.reflections=sliced.refl",
expt,
refl,
],
working_directory=tmpdir,
)
sliced_expts = load.experiment_list(
tmpdir.join("sliced.expt").strpath, check_format=False
)
assert len(sliced_expts) == 17
sliced_refl = flex.reflection_table.from_file(tmpdir.join("sliced.refl").strpath)
assert len(set(sliced_refl.experiment_identifiers().values())) == 17
sliced_refl.assert_experiment_identifiers_are_consistent(sliced_expts)
def test_slice_sequence_with_scan_varying_crystal(dials_data, tmpdir):
"""test slicing keeps a scan-varying crystal"""
expt = dials_data("l_cysteine_4_sweeps_scaled") / "scaled_30.expt"
procrunner.run(
[
"dials.slice_sequence",
"image_range=10,20",
"output.experiments=sliced.expt",
expt,
],
working_directory=tmpdir,
)
orig = load.experiment_list(expt.strpath, check_format=False)[0]
sliced = load.experiment_list(
tmpdir.join("sliced.expt").strpath, check_format=False
)[0]
assert sliced.crystal.num_scan_points == 12
orig_UB = [
orig.crystal.get_A_at_scan_point(i) for i in range(orig.crystal.num_scan_points)
]
sliced_UB = [
sliced.crystal.get_A_at_scan_point(i)
for i in range(sliced.crystal.num_scan_points)
]
for a, b in zip(orig_UB[9:21], sliced_UB):
assert a == pytest.approx(b)
|
StarcoderdataPython
|
3393957
|
from webApp.utils.cryptographie import *
class Credentials:
vault_masterpassword = None
vault_username = None
user_id = None
len_vault_username = None
len_vault_masterpassword = None
#
len_user_id_hashed = None
user_id_hash = None
#user_id={lenght=fixed(64)}
def __init__(self, pvault_username, pvault_masterpassword, puser_id):
self.user_id = puser_id
self.vault_username = hash(pvault_username, 99999, puser_id.encode())
self.vault_masterpassword = hash(pvault_masterpassword, <PASSWORD>, puser_id.encode())
self.len_vault_username = len(pvault_username)
self.len_vault_masterpassword = len(pvault_masterpassword)
self.len_user_id = len(self.user_id)
self.user_id_hash = hash(self.user_id, 14082, puser_id.encode())
def get_creds(self, obj_1, obj_2, const, merged_data):
output = None
if obj_1 > obj_2:
calc_value = const - (obj_1 - obj_2)
if calc_value < 0:
self.mani_value = obj_1 - calc_value
else:
self.mani_value = obj_1 + calc_value
output = merged_data[obj_2:self.mani_value]
elif obj_1 < obj_2:
calc_value = const - (obj_2 - obj_1)
if calc_value < 0:
self.mani_value = obj_2 - calc_value
else:
self.mani_value = obj_2 + calc_value
output = merged_data[obj_1:self.mani_value]
else:
calc_value = const - (obj_2)
if calc_value < 0:
self.mani_value = obj_2 - calc_value
else:
self.mani_value = obj_2 + calc_value
output = merged_data[obj_1:self.mani_value]
return output
#IV = 16
#SALT = 64
def get_SecretKey(self): #1 start
#obj_1=master_password length
#obj_2=username length
merged_data_salt = hash((self.vault_username+self.vault_masterpassword+self.user_id_hash)*(self.len_vault_masterpassword+self.len_vault_username), 99999, self.user_id.encode())
salt = self.get_creds(self.len_vault_masterpassword, self.len_vault_username, 64, merged_data_salt).encode()
merged_data_iv = hash((self.vault_username+self.vault_masterpassword)*(self.len_vault_masterpassword+self.len_vault_username), 99999, salt)
iv = self.get_creds(self.len_vault_masterpassword, self.len_vault_username, 16, merged_data_iv).encode()
return iv+salt
def gen_UserKey(self, salt): #2 start
hash_list = hash(self.vault_username, 10000, salt), hash(self.vault_masterpassword, 10000, salt), hash(self.user_id, 10000, salt)
user_pass_hash = hash(hash_list[1] + hash_list[2], 50000, salt)
pass_id_hash = hash(hash_list[2] + hash_list[0], 50000, salt)
userKey = hash(user_pass_hash + pass_id_hash, len(self.vault_username) * len(self.vault_masterpassword), salt)
return userKey
def gen_key(self, psecret_key, salt):#3 start
secret_key = hash(psecret_key.decode("ascii"), 99999, salt)
key = hash(secret_key + self.gen_UserKey(salt), 99999, salt)
return key
|
StarcoderdataPython
|
131689
|
<filename>day02/python/part1.py
#!/usr/bin/env python3
import helper
class ShouldNeverGetHere(Exception):
pass
def main():
# lines = helper.read_lines("example.txt")
lines = helper.read_lines("input.txt")
horizontal, depth = (0, 0)
for line in lines:
parts = line.split()
instruction = parts[0]
value = int(parts[1])
if instruction == "forward":
horizontal += value
elif instruction == "down":
depth += value
elif instruction == "up":
depth -= value
else:
raise ShouldNeverGetHere()
#
#
print(f"{horizontal=}")
print(f"{depth=}")
print("---")
print(horizontal * depth)
##############################################################################
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
9628240
|
<reponame>lukeenterprise/json-subschema
'''
Created on May 30, 2019
@author: <NAME>
'''
import unittest
from jsonsubschema.checker import isSubschema
class TestArraySubtype(unittest.TestCase):
def test_identity(self):
s1 = {"$schema": "http://json-schema.org/draft-04/schema",
"type": "array",
"minItems": 5, "maxItems:": 10}
s2 = s1
self.assertTrue(isSubschema(s1, s2))
def test_min_max(self):
s1 = {"$schema": "http://json-schema.org/draft-04/schema",
"type": "array",
"minItems": 5, "maxItems:": 10}
s2 = {"$schema": "http://json-schema.org/draft-04/schema",
"type": "array",
"minItems": 1, "maxItems:": 20}
with self.subTest():
self.assertTrue(isSubschema(s1, s2))
with self.subTest():
self.assertFalse(isSubschema(s2, s1))
def test_unique(self):
s1 = {"$schema": "http://json-schema.org/draft-04/schema",
"type": "array", "uniqueItems": True}
s2 = {"$schema": "http://json-schema.org/draft-04/schema",
"type": "array", "uniqueItems": False}
with self.subTest():
self.assertTrue(isSubschema(s1, s2))
with self.subTest():
self.assertFalse(isSubschema(s2, s1))
def test_empty_items1(self):
s1 = {"$schema": "http://json-schema.org/draft-04/schema",
"type": "array"}
s2 = {"$schema": "http://json-schema.org/draft-04/schema",
"type": "array", "items": {}}
with self.subTest():
self.assertTrue(isSubschema(s1, s2))
with self.subTest():
self.assertTrue(isSubschema(s2, s1))
def test_empty_items2(self):
s1 = {"$schema": "http://json-schema.org/draft-04/schema",
"type": "array", "additionalItems": False}
s2 = {"$schema": "http://json-schema.org/draft-04/schema",
"type": "array", "items": {}}
with self.subTest():
self.assertTrue(isSubschema(s1, s2))
with self.subTest():
self.assertTrue(isSubschema(s2, s1))
def test_empty_items3(self):
s1 = {"$schema": "http://json-schema.org/draft-04/schema",
"type": "array", "items": [{}, {}], "additionalItems": False}
s2 = {"$schema": "http://json-schema.org/draft-04/schema",
"type": "array", "items": {}}
with self.subTest():
self.assertTrue(isSubschema(s1, s2))
with self.subTest():
self.assertFalse(isSubschema(s2, s1))
def test_empty_items4(self):
s1 = {"$schema": "http://json-schema.org/draft-04/schema",
"type": "array", "items": [{}, {}], "additionalItems": True}
s2 = {"$schema": "http://json-schema.org/draft-04/schema",
"type": "array", "items": {}}
with self.subTest():
self.assertTrue(isSubschema(s1, s2))
with self.subTest():
self.assertTrue(isSubschema(s2, s1))
def test_empty_items5(self):
s1 = {"$schema": "http://json-schema.org/draft-04/schema",
"type": "array", "items": [{}, {}], "additionalItems": False}
s2 = {"$schema": "http://json-schema.org/draft-04/schema",
"type": "array", "items": [{}], "additionalItems": False}
with self.subTest():
self.assertFalse(isSubschema(s1, s2))
with self.subTest():
self.assertTrue(isSubschema(s2, s1))
def test_dictItems_listItems1(self):
s1 = {"$schema": "http://json-schema.org/draft-04/schema",
"type": "array", "items": {"type": "string"}}
s2 = {"$schema": "http://json-schema.org/draft-04/schema",
"type": "array", "items": [{"type": "string"}]}
with self.subTest():
self.assertTrue(isSubschema(s1, s2))
with self.subTest():
self.assertFalse(isSubschema(s2, s1))
def test_dictItems_listItems2(self):
s1 = {"$schema": "http://json-schema.org/draft-04/schema",
"type": "array", "items": {"type": "string"}}
s2 = {"$schema": "http://json-schema.org/draft-04/schema",
"type": "array", "items": [{"type": "string"}, {"type": "string"}]}
with self.subTest():
self.assertTrue(isSubschema(s1, s2))
with self.subTest():
self.assertFalse(isSubschema(s2, s1))
def test_dictItems_listItems3(self):
s1 = {"$schema": "http://json-schema.org/draft-04/schema",
"type": "array", "items": [{"type": "string"}]}
s2 = {"$schema": "http://json-schema.org/draft-04/schema",
"type": "array", "items": [{"type": "string"}, {"type": "number"}]}
with self.subTest():
self.assertFalse(isSubschema(s1, s2))
with self.subTest():
self.assertTrue(isSubschema(s2, s1))
def test_dictItems_listItems4(self):
s1 = {"$schema": "http://json-schema.org/draft-04/schema",
"type": "array", "items": [{"type": "string"}], "additionalItems": False}
s2 = {"$schema": "http://json-schema.org/draft-04/schema",
"type": "array", "items": [{"type": "string"}, {"type": "number"}]}
with self.subTest():
self.assertTrue(isSubschema(s1, s2))
with self.subTest():
self.assertFalse(isSubschema(s2, s1))
def test_dictItems_listItems5(self):
s1 = {"$schema": "http://json-schema.org/draft-04/schema",
"type": "array", "items": [{"type": "string"}], "additionalItems": True}
s2 = {"$schema": "http://json-schema.org/draft-04/schema",
"type": "array", "items": [{"type": "string"}, {"type": "number"}]}
with self.subTest():
self.assertFalse(isSubschema(s1, s2))
with self.subTest():
self.assertTrue(isSubschema(s2, s1))
def test_dictItems_listItems6(self):
s1 = {"$schema": "http://json-schema.org/draft-04/schema",
"type": "array", "items": [{"type": "string"}], "additionalItems": {}}
s2 = {"$schema": "http://json-schema.org/draft-04/schema",
"type": "array", "items": [{"type": "string"}, {"type": "number"}]}
with self.subTest():
self.assertFalse(isSubschema(s1, s2))
with self.subTest():
self.assertTrue(isSubschema(s2, s1))
|
StarcoderdataPython
|
8057953
|
# coding: utf-8
from __future__ import print_function
import json
from comodit_client.api.platform import Image
from comodit_client.api.settings import SimpleSetting
from comodit_client.control import completions
from comodit_client.control.doc import ActionDoc
from comodit_client.control.entity import EntityController
from comodit_client.control.exceptions import ArgumentException
from comodit_client.control.json_update import JsonUpdater
class InstancesController(EntityController):
def __init__(self):
super(InstancesController, self).__init__()
# actions
self._register(["start"], self._start, self._print_entity_completions)
self._register(["pause"], self._pause, self._print_entity_completions)
self._register(["resume"], self._resume, self._print_entity_completions)
self._register(["shutdown"], self._shutdown, self._print_entity_completions)
self._register(["poweroff"], self._poweroff, self._print_entity_completions)
self._register(["forget"], self._forget, self._print_entity_completions)
self._register(["properties"], self._properties, self._print_entity_completions)
self._register(["show_file"], self._show_file, self._print_entity_completions)
self._register(["get_status"], self._get_status, self._print_entity_completions)
self._register(["create_image"], self._create_image, self._print_entity_completions)
# Unregister unsupported actions
self._unregister(["update", "list", "add"])
self._doc = "Host instances handling."
self._update_action_doc_params("delete", "<org_name> <env_name> <host_name>")
self._update_action_doc_params("show", "<org_name> <env_name> <host_name>")
self._register_action_doc(self._start_doc())
self._register_action_doc(self._pause_doc())
self._register_action_doc(self._resume_doc())
self._register_action_doc(self._shutdown_doc())
self._register_action_doc(self._poweroff_doc())
self._register_action_doc(self._forget_doc())
self._register_action_doc(self._properties_doc())
self._register_action_doc(self._show_file_doc())
self._register_action_doc(self._get_status_doc())
self._register_action_doc(self._create_image_doc())
def get_collection(self, argv):
if len(argv) < 3:
raise ArgumentException("Wrong number of arguments");
return self._client.get_host(argv[0], argv[1], argv[2]).instance()
def _print_collection_completions(self, param_num, argv):
if param_num == 0:
completions.print_identifiers(self._client.organizations())
elif len(argv) > 0 and param_num == 1:
completions.print_identifiers(self._client.environments(argv[0]))
elif len(argv) > 1 and param_num == 2:
completions.print_identifiers(self._client.hosts(argv[0], argv[1]))
def _print_entity_completions(self, param_num, argv):
if param_num < 3:
self._print_collection_completions(param_num, argv)
def _get_name_argument(self, argv):
return ""
def _get_value_argument(self, argv):
return None
def _properties(self, argv):
instance = self._get_entity(argv)
options = self._config.options
if options.raw:
print(json.dumps(instance._get_field("properties"), indent = 4))
else:
for p in instance.properties:
p.show()
def _properties_doc(self):
return ActionDoc("properties", "<org_name> <env_name> <host_name>", """
Show properties of a given host instance.""")
def _delete(self, argv):
instance = self._get_entity(argv)
instance.delete()
def _delete_doc(self):
return ActionDoc("delete", "<org_name> <env_name> <host_name>", """
Delete a host instance.""")
def _start(self, argv):
instance = self._get_entity(argv)
instance.start()
def _start_doc(self):
return ActionDoc("start", "<org_name> <env_name> <host_name>", """
Start a host instance.""")
def _pause(self, argv):
instance = self._get_entity(argv)
instance.pause()
def _pause_doc(self):
return ActionDoc("pause", "<org_name> <env_name> <host_name>", """
Pause a host instance.""")
def _resume(self, argv):
instance = self._get_entity(argv)
instance.resume()
def _resume_doc(self):
return ActionDoc("resume", "<org_name> <env_name> <host_name>", """
Resume a host instance.""")
def _shutdown(self, argv):
instance = self._get_entity(argv)
instance.shutdown()
def _shutdown_doc(self):
return ActionDoc("shutdown", "<org_name> <env_name> <host_name>", """
Shutdown a host instance.""")
def _poweroff(self, argv):
instance = self._get_entity(argv)
instance.poweroff()
def _poweroff_doc(self):
return ActionDoc("poweroff", "<org_name> <env_name> <host_name>", """
Power-off a host instance.""")
def _forget(self, argv):
instance = self._get_entity(argv)
instance.forget()
def _forget_doc(self):
return ActionDoc("forget", "<org_name> <env_name> <host_name>", """
Forgets a host instance.""")
def _show_file(self, argv):
if len(argv) < 4:
raise ArgumentException("Wrong number of arguments");
instance = self._get_entity(argv)
print(instance.get_file_content(argv[3]).read(), end=' ')
def _show_file_doc(self):
return ActionDoc("show_file", "<org_name> <env_name> <host_name> <path>", """
Show a host's file content.""")
def _get_status(self, argv):
if len(argv) < 5:
raise ArgumentException("Wrong number of arguments");
instance = self._get_entity(argv)
print(instance.get_status(argv[3], argv[4]), end=' ')
def _get_status_doc(self):
return ActionDoc("get_status", "<org_name> <env_name> <host_name> <collection> <sensor>", """
Show a host's file content.""")
def _create_image(self, argv):
image = Image()
image.create_distribution = False
host = self._client.get_host(argv[0], argv[1], argv[2])
platform = self._client.get_platform(argv[0], host.platform_name)
image.settings = [ self._build_setting(param) for param in platform.image_parameters() ]
updater = JsonUpdater(self._config.options, ignore_not_modified=True)
updated_json = updater.update(image)
image.set_json(updated_json)
instance = self._get_entity(argv)
instance.create_image(image)
def _build_setting(self, parameter):
setting = SimpleSetting(None)
setting.key = parameter.key
setting.value = parameter.value
return setting
def _create_image_doc(self):
return ActionDoc("create_image", "<org_name> <env_name> <host_name>", """
Creates an image from given host's instance.""")
|
StarcoderdataPython
|
5020018
|
<filename>rsbook_code/utilities/differences.py
from __future__ import print_function,division
from builtins import range
import numpy as np
def gradient_forward_difference(f,x,h):
"""Approximation of the gradient of f(x) using forward differences with step size h"""
g = np.zeros(len(x))
f0 = f(x)
for i in range(len(x)):
v = x[i]
x[i] += h
g[i] = (f(x)-f0)
x[i] = v
g *= 1.0/h
return g
def jacobian_forward_difference(f,x,h):
"""Approximation of the Jacobian of vector function f(x) using forward differences with step size h"""
f0 = np.asarray(f(x))
J = np.zeros((len(f0),len(x)))
for i in range(len(x)):
v = x[i]
x[i] += h
J[:,i] = (np.asarray(f(x))-f0)
x[i] = v
J *= 1.0/h
return J
def hessian_forward_difference(f,x,h):
"""Approximation of the hessian of f(x) using forward differences with
step size h.
"""
H = np.zeros((len(x),len(x)))
f0 = f(x)
fs = []
for i in range(len(x)):
v = x[i]
x[i] += h
fs.append(f(x))
x[i] = v
for i in range(len(x)):
v = x[i]
x[i] += h
for j in range(i):
w = x[j]
x[j] += h
fij = f(x)
H[i,j] = (fij-fs[j]) - (fs[i]-f0)
H[j,i] = H[i,j]
x[j] = w
x[i] = v
x[i] -= h
fij = f(x)
H[i,i] = (fs[i]-f0) - (f0-fij)
x[i] = v
H *= 1.0/h**2
return H
def hessian2_forward_difference(f,x,y,h):
"""Approximation of the hessian of a 2-parameter function f(x,y) w.r.t. x and y
using forward differences with step size h.
"""
H = np.zeros((len(x),len(y)))
f0 = f(x,y)
fxs = []
fys = []
for i in range(len(x)):
v = x[i]
x[i] += h
fxs.append(f(x,y))
x[i] = v
for i in range(len(y)):
v = y[i]
y[i] += h
fys.append(f(x,y))
y[i] = v
for i in range(len(x)):
v = x[i]
x[i] += h
for j in range(len(y)):
w = y[j]
y[j] += h
fij = f(x,y)
H[i,j] = ((fij-fys[j]) - (fxs[i]-f0))
y[j] = w
x[i] = v
H *= 1.0/h**2
return H
|
StarcoderdataPython
|
12803872
|
def client(api_key, api_url=None, version=None, **kwargs):
from client import QencodeApiClient
return QencodeApiClient(api_key, api_url=api_url, version=version, **kwargs)
def custom_params():
from custom_params import CustomTranscodingParams
return CustomTranscodingParams()
def format():
from custom_params import Format
return Format()
def destination():
from custom_params import Destination
return Destination()
def stream():
from custom_params import Stream
return Stream()
def x264_video_codec():
from custom_params import Libx264_VideoCodecParameters
return Libx264_VideoCodecParameters()
def x265_video_codec():
from custom_params import Libx265_VideoCodecParameters
return Libx265_VideoCodecParameters()
from exeptions import QencodeClientException, QencodeTaskException
__version__ = "0.9.29"
__status__ = "Beta"
__author__ = "Qencode"
|
StarcoderdataPython
|
8089913
|
<filename>aliyun-api-gateway-demo-sign/ClientDemo.py
# -*- coding: utf-8 -*-
from com.aliyun.api.gateway.sdk import client
from com.aliyun.api.gateway.sdk.http import request
from com.aliyun.api.gateway.sdk.common import constant
# 这里为友盟一键登录接口
host = "https://verify5.market.alicloudapi.com"
url = "/api/v1/mobile/info?appkey=xxx"
cli = client.DefaultClient(app_key="appKey", app_secret="appSecret")
# GET
# req = request.Request(host=host,protocol=constant.HTTP, url=url, method="GET", time_out=30000)
# print cli.execute(req)
#post body stream
import json
req_post = request.Request(host=host, protocol=constant.HTTP, url=url, method="POST", time_out=30000)
body = {}
body["token"] = "<PASSWORD>"
req_post.set_body(json.dumps(body))
req_post.set_content_type(constant.CONTENT_TYPE_STREAM)
_, _, data = cli.execute(req_post)
print(json.loads(data))
#post form
# req_post = request.Request(host=host, protocol=constant.HTTP, url=url, method="POST", time_out=30000)
# bodyMap = {}
# bodyMap["bodyForm1"] = "fwefwef"
# bodyMap["bodyForm2"] = "ffwefwef"
# req_post.set_body(bodyMap)
# req_post.set_content_type(constant.CONTENT_TYPE_FORM)
# print cli.execute(req_post)
|
StarcoderdataPython
|
79573
|
#
# Copyright (c) 2018 Via Technology Ltd. All Rights Reserved.
# Consult your license regarding permissions and restrictions.
#
"""
operations related to airspaces and intersections.
"""
from psycopg2 import Error, InternalError
from psycopg2.extensions import AsIs
from psycopg2.extras import DictCursor
from itertools import filterfalse
from functools import reduce
from shapely.wkt import loads
import pru.db.context as ctx
from pru.logger import logger
log = logger(__name__)
def make_point(lon, lat, connection):
"""
Makes a geo point
"""
cursor = connection.cursor()
query = "SELECT ST_MakePoint(%s, %s)"
params = (float(lon), float(lat))
cursor.execute(query, params)
return cursor.fetchone()
def make_augmented_point_from_position(position, flight_id, connection):
"""
Takes a position tuple and makes a augmented point.
"""
point = make_point(position[1], position[0], connection)
return {'flight_id': flight_id, 'lon': position[1], 'lat': position[0],
'geoPoint': point}
def make_augmented_points_from_positions(latitudes, longitudes, flight_id, connection):
"""
Takes a list of latitudes and a list of longitudes and a flight_id.
Makes a list of augmented points.
"""
return [make_augmented_point_from_position(position, flight_id, connection) for position in zip(latitudes, longitudes)]
def extract_point_list_from_augmented_points(augmented_points):
"""
Given a list or generator of augmented points extract the geo point
representation as a list.
"""
return list(map(lambda augmented_points: augmented_points['geoPoint'],
augmented_points))
def make_line_from_augmented_points(augmented_points, flight_id, connection):
"""
Given a list of augmented points create a geographic line.
"""
if (len(augmented_points) == 0):
log.warning(f"Creating a line from a list of points but the list "
"was empty for flight id {flight_id}.")
return [[]]
cursor = connection.cursor()
query = "SELECT ST_AsEWKT(ST_MakeLine(ARRAY[%s]));"
params = [augmented_points]
cursor.execute(query, params)
return cursor.fetchone()
def find_sectors_intersected_by(line_string, flight_id, min_altitude, max_altitude, context, connection):
"""
Lists the airspace ids and details of those airspaces where the
given line string intersects excluding those that are outside of the range of
altitudes of the trajectory.
"""
log.debug(f"Finding trajectory intersection with airspaces for flight id: {flight_id}")
schema_name = context[ctx.SCHEMA_NAME]
try:
with connection.cursor() as cursor:
query = "SELECT id, av_airspace_id, min_altitude, max_altitude " \
"from %s.sectors where " \
"NOT (max_altitude < %s OR min_altitude > %s) AND " \
"ST_Intersects(wkt, ST_GeographyFromText('SRID=4326;%s'));"
params = [AsIs(schema_name), min_altitude, max_altitude, AsIs(line_string)]
cursor.execute(query, params)
return cursor.fetchall()
except InternalError:
log.exception(f"Failed whist trying to find the intersection between "
"a route with flight id {flight_id} and the airspace model.")
return []
def find_user_sectors_intersected_by(line_string, flight_id, min_altitude, max_altitude, context, connection):
"""
Lists the user defined airspace uids and details of those airspaces where the
given line string intersects.
"""
log.debug(f"Finding trajectory intersection with user defined airspaces for flight id: {flight_id}")
schema_name = context[ctx.SCHEMA_NAME]
try:
with connection.cursor() as cursor:
query = "SELECT id, org_id, min_altitude, max_altitude, user_id, " \
"sector_name from %s.user_defined_sectors where " \
"NOT (max_altitude < %s OR min_altitude > %s) AND " \
"ST_Intersects(wkt, ST_GeographyFromText('SRID=4326;%s'));"
params = [AsIs(schema_name), min_altitude, max_altitude, AsIs(line_string)]
cursor.execute(query, params)
return cursor.fetchall()
except InternalError:
log.exception(f"Failed whist trying to find the intersection between "
"a route with flight id {flight_id} and the airspace model.")
return []
def make_geographic_trajectory(augmented_points, flight_id, connection):
"""
Given a list of augmented points create a geographic line segment.
"""
log.debug(f"Making geo trajectory for flight id: {flight_id}")
return make_line_from_augmented_points(
extract_point_list_from_augmented_points(augmented_points),
flight_id,
connection)[0]
def make_augmented_trajectory(augmented_points, geographic_trajectory, flight_id, min_altitude, max_altitude, connection, is_user_defined=False):
"""
Makes a trajectory augmented with geographic positions and a list of sectors
intersected by the trajectory excluding those that do not meet the altitude range
of the trajectory.
"""
log.debug(f"Creating an augmented trajectory for flight id: {flight_id}")
if not is_user_defined:
sectors = find_sectors_intersected_by(geographic_trajectory, flight_id, min_altitude, max_altitude, ctx.CONTEXT, connection)
else:
sectors = find_user_sectors_intersected_by(geographic_trajectory, flight_id, min_altitude, max_altitude, ctx.CONTEXT, connection)
return {'extendedPoints': augmented_points,
'line': geographic_trajectory,
'sectors': sectors,
'is_user_defined': is_user_defined}
def find_sector(db_ID, connection):
schemaName = ctx.CONTEXT[ctx.SCHEMA_NAME]
with connection.cursor(cursor_factory=DictCursor) as cursor:
cursor.execute("SELECT id, av_airspace_id, av_icao_state_id, av_name, min_altitude, max_altitude FROM %s.sectors WHERE "
"id = %s",
[AsIs(schemaName), db_ID])
return cursor.fetchone()
def find_sector_identifiers(db_ID, context, connection):
"""
Finds the identifiers for a sector given the db id of the sector.
"""
schemaName = context[ctx.SCHEMA_NAME]
with connection.cursor(cursor_factory=DictCursor) as cursor:
cursor.execute("SELECT av_airspace_id, av_icao_state_id, av_name FROM %s.sectors WHERE "
"id = %s",
[AsIs(schemaName), db_ID])
return cursor.fetchmany()
def find_airspace_by_database_ID(db_ID, context, connection, is_user_defined=False):
"""
Finds an aairspace with the given database id
Returns a list, list may be empty.
"""
schemaName = context[ctx.SCHEMA_NAME]
with connection.cursor(cursor_factory=DictCursor) as cursor:
if is_user_defined:
cursor.execute("SELECT * FROM %s.user_defined_sectors WHERE "
"id = %s", [AsIs(schemaName), db_ID])
return cursor.fetchmany()
else:
cursor.execute("SELECT * FROM %s.sectors WHERE "
"id = %s", [AsIs(schemaName), db_ID])
return cursor.fetchmany()
def originates(first_point, polygon_string, flight_id, sector_id, connection):
"""
If the first point is inside the given sector we determine that the
trajectory originates in the sector.
first_point wkb for the first point of the trajectory
returns True => originates in sectors
"""
cursor = connection.cursor()
query = "SELECT ST_Intersects(%s::geography, %s::geography);"
params = [first_point, polygon_string]
cursor.execute(query, params)
originates = cursor.fetchone()[0]
if originates:
log.debug(f"Flight with id {flight_id} originates in sector {sector_id}")
return originates
def find_line_poly_intersection_without_boundary(lineString, polygonString, connection):
"""
Use the geo db to find the intersections between the linestring and the unbounded polygon string.
The polygon is assumed to _NOT_ have a boundary around it.
"""
query = "SELECT ST_AsText(ST_Intersection(%s::geography, ST_Force2D(ST_Boundary(%s))::geography));"
params = [lineString, polygonString]
try:
with connection.cursor() as cursor:
cursor.execute(query, params)
res = cursor.fetchall()
return {'segmentStrings': res,
'ploygonString': polygonString}
except Error:
log.exception("Failed to find intersection : Error")
return []
def find_line_poly_intersection_with_boundary(lineString, polygonString, connection):
"""
Use the geo db to find the intersections between the linestring and the bounded polygon string.
The polygon is assumed to already have a boundary around it.
"""
query = "SELECT unit.find_intersections(%s, %s)"
params = [lineString, polygonString]
try:
with connection.cursor() as cursor:
cursor.execute(query, params)
res = cursor.fetchall()
return {'segmentStrings': res,
'ploygonString': polygonString}
except Error:
log.exception("Failed to find intersection : Error")
return []
def find_intersections(augmented_trajectory, min_altitude, max_altitude, flight_id, connection):
"""
Finds the points on the trajectory that intersect with the sectors of the
the augmented trajectory.
"""
log.debug(f"Finding intersection for flight id {flight_id}")
first_point = augmented_trajectory['extendedPoints'][0]['geoPoint']
first_point_lon = augmented_trajectory['extendedPoints'][0]['lon']
first_point_lat = augmented_trajectory['extendedPoints'][0]['lat']
is_user_defined = augmented_trajectory['is_user_defined']
# Find each sector
sector_IDs = [sector[0] for sector in augmented_trajectory['sectors']]
log.debug("Found sector ids %s", str(sector_IDs))
sectors = [find_airspace_by_database_ID(str(sector_id),
ctx.CONTEXT,
connection, is_user_defined)[0] for sector_id in sector_IDs]
# Find the points of the trajectory where the trajectory intersects
# with each sector
if is_user_defined:
segments = [{'flight_id': flight_id,
'intersections': find_line_poly_intersection_with_boundary(augmented_trajectory['line'],
sector['bounded_sector'],
connection),
'origin': {'is_origin': originates(first_point, sector['wkt'], flight_id, sector['id'], connection),
'origin_lat': first_point_lat,
'origin_lon': first_point_lon},
'id': sector['id'],
'org_id': sector['org_id'],
'user_id': sector['user_id'],
'sector_name': sector['sector_name'],
'min_altitude': sector['min_altitude'],
'max_altitude': sector['max_altitude'],
'is_cylinder': sector['is_cylinder'],
'is_user_defined': is_user_defined} for sector in sectors]
else:
segments = [{'flight_id': flight_id,
'intersections': find_line_poly_intersection_with_boundary(augmented_trajectory['line'],
sector['bounded_sector'],
connection),
'origin': {'is_origin': originates(first_point, sector['wkt'], flight_id, sector['id'], connection),
'origin_lat': first_point_lat,
'origin_lon': first_point_lon},
'id': sector['id'],
'av_icao_state_id': sector['av_icao_state_id'],
'av_name': sector['av_name'],
'av_airspace_id': sector['av_airspace_id'],
'min_altitude': sector['min_altitude'],
'max_altitude': sector['max_altitude'],
'is_user_defined': is_user_defined} for sector in sectors]
return segments
def extract(sector_id, shape, flight_id):
"""
Given a shapley shape find if we have a point or a multipoint.
For a point extract the y, x pair as a list of one tuple of sector_id,
latitude and longitude.
For a multipoint return a list of multiple tuples.
"""
if shape.geom_type == 'MultiPoint':
return [(sector_id, p.y, p.x) for p in shape]
elif shape.geom_type == 'Point':
return [(sector_id, shape.y, shape.x)]
else:
log.debug("Unknown geom type : %s in flight id %s and sector_id %s, was %s, skipping", shape.geom_type, flight_id, sector_id, str(shape))
return []
def extract_details_from_intersection(sector_id, wkt, origin, flight_id):
"""
Given an intersection wkt use shapley to create the point or multipoint
object. Then extract the latitude and longitudes from the (multi)point.
Returns a list of tuples of sector_id, latiitude and longitude
"""
intersection_tuples = extract(sector_id, loads(wkt), flight_id)
if origin['is_origin']:
# If this sector is an origin sector, add in the lat lons at the start.
intersection_tuples = [(sector_id, origin['origin_lat'], origin['origin_lon'])] + intersection_tuples
return intersection_tuples
def make_sector_description(intersection, is_user_defined=False):
"""
Makes a text description of the sector from the intersection description
"""
if is_user_defined:
return f'{intersection["org_id"]}/{intersection["user_id"]}/{intersection["sector_name"]}'
else:
return f'{intersection["av_icao_state_id"]}/{intersection["av_name"]}/{intersection["id"]}/{intersection["av_airspace_id"]}'
def make_sector_identifier(intersection):
"""
Makes a text version of the database id in the given intersection
"""
return f'{intersection["id"]}'
def extract_intersection_wkts(intersections):
"""
Given a list of intersection dicts return a list of wkts with sector
descriptive text and the origin details as a tuple.
ie ("some-text-made-from-sector-ids", wkt, {is_origin:False, origin_lat:lat, origin_lon: lon})
"""
return [(make_sector_identifier(intersection),
intersection['intersections']['segmentStrings'][0][0], intersection['origin'])
for intersection in intersections]
def merge_l_t(l, lt):
"""
Merge a list of tuples lt, each of three values into three lists l.
For example: [('a', 'b', 'c'), ('a', 'd', 'e')] ->
[['a', 'a'], ['b', 'd'], ['c', 'e']]
"""
for t in lt:
l[0].append(t[1])
l[1].append(t[2])
l[2].append(t[0])
return l
def create_intersection_data_structure(intersections, flight_id):
"""
Given the intersection data structures create a response tuple.
"""
# The intersection wkts are tuples of the sector_id, the wkt and the origin
# status for the intersection.
intersection_wkts = extract_intersection_wkts(intersections)
intersection_details = [extract_details_from_intersection(*intersection_wkt, flight_id) for intersection_wkt in intersection_wkts]
x_y_sector_ids = reduce(merge_l_t, intersection_details, [[], [], []])
return x_y_sector_ids[0], x_y_sector_ids[1], x_y_sector_ids[2]
|
StarcoderdataPython
|
11205547
|
<filename>setup.py
from importlib import import_module
from pathlib import Path
from setuptools import setup, find_packages
SRC_ROOT = 'src'
BIN_ROOT = 'bin/'
about = import_module(SRC_ROOT + '.rhasspy_desktop_satellite.about')
with Path('README.md').open('r') as fh:
long_description = fh.read()
with Path('requirements.txt').open('r') as fh:
requirements = fh.read().splitlines()
requirements = [requirement for requirement in requirements
if not requirement.startswith('#')]
binaries = [BIN_ROOT + about.PROJECT]
setup(
name=about.PROJECT,
version=about.VERSION,
description=about.DESCRIPTION,
long_description=long_description,
long_description_content_type='text/markdown',
license=about.LICENSE,
author=about.AUTHOR,
author_email=about.EMAIL,
url=about.GITHUB_URL,
project_urls={
'Documentation': about.DOC_URL,
'Source': about.GITHUB_URL,
'Tracker': about.TRACKER_URL,
},
packages=find_packages(SRC_ROOT),
package_dir={'': SRC_ROOT},
install_requires=requirements,
python_requires='>=3',
include_package_data=True,
zip_safe=False,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Home Automation',
'Topic :: Multimedia :: Sound/Audio :: Capture/Recording',
'Topic :: Multimedia :: Sound/Audio :: Players'
],
keywords=about.KEYWORDS,
scripts=binaries)
|
StarcoderdataPython
|
267033
|
<reponame>hafeez3000/wnframework<gh_stars>1-10
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import webnotes
def get_workflow_name(doctype):
if getattr(webnotes.local, "workflow_names", None) is None:
webnotes.local.workflow_names = {}
if doctype not in webnotes.local.workflow_names:
workflow_name = webnotes.conn.get_value("Workflow", {"document_type": doctype,
"is_active": "1"}, "name")
# no active? get default workflow
if not workflow_name:
workflow_name = webnotes.conn.get_value("Workflow", {"document_type": doctype},
"name")
webnotes.local.workflow_names[doctype] = workflow_name
return webnotes.local.workflow_names[doctype]
def get_default_state(doctype):
workflow_name = get_workflow_name(doctype)
return webnotes.conn.get_value("Workflow Document State", {"parent": workflow_name,
"idx":1}, "state")
def get_state_fieldname(doctype):
workflow_name = get_workflow_name(doctype)
return webnotes.conn.get_value("Workflow", workflow_name, "workflow_state_field")
|
StarcoderdataPython
|
5155131
|
<reponame>ishine/neural_sp
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
"""Test for RNN encoder."""
import importlib
import math
import numpy as np
import pytest
import torch
from neural_sp.models.torch_utils import (
np2tensor,
pad_list
)
def make_args(**kwargs):
args = dict(
input_dim=80,
enc_type='blstm',
n_units=16,
n_projs=0,
last_proj_dim=0,
n_layers=4,
n_layers_sub1=0,
n_layers_sub2=0,
dropout_in=0.1,
dropout=0.1,
subsample="1_1_1_1",
subsample_type='drop',
n_stacks=1,
n_splices=1,
frontend_conv=None,
bidir_sum_fwd_bwd=False,
task_specific_layer=False,
param_init=0.1,
chunk_size_current="0",
chunk_size_right="0",
cnn_lookahead=True,
rsp_prob=0,
)
args.update(kwargs)
return args
def make_args_conv(**kwargs):
args = dict(
input_dim=80,
in_channel=1,
channels="32_32",
kernel_sizes="(3,3)_(3,3)",
strides="(1,1)_(1,1)",
poolings="(2,2)_(2,2)",
dropout=0.1,
normalization='',
residual=False,
bottleneck_dim=0,
param_init=0.1,
)
args.update(kwargs)
return args
@pytest.mark.parametrize(
"args, args_conv",
[
# RNN type
({'enc_type': 'blstm'}, {}),
({'enc_type': 'lstm'}, {}),
({'enc_type': 'lstm', 'rsp_prob': 0.5}, {}),
# 2dCNN-RNN
({'enc_type': 'conv_blstm'}, {}),
({'enc_type': 'conv_blstm', 'input_dim': 240}, {'input_dim': 240, 'in_channel': 3}),
# 1dCNN-RNN
({'enc_type': 'conv_blstm'}, {'kernel_sizes': "3_3", 'strides': "1_1", 'poolings': "2_2"}),
({'enc_type': 'conv_blstm', 'input_dim': 240},
{'input_dim': 240, 'in_channel': 3, 'kernel_sizes': "3_3", 'strides': "1_1", 'poolings': "2_2"}),
# normalization
({'enc_type': 'conv_blstm'}, {'normalization': 'batch_norm'}),
({'enc_type': 'conv_blstm'}, {'normalization': 'layer_norm'}),
# projection
({'enc_type': 'blstm', 'n_projs': 8}, {}),
({'enc_type': 'lstm', 'n_projs': 8}, {}),
({'enc_type': 'blstm', 'bidir_sum_fwd_bwd': True}, {}),
({'enc_type': 'blstm', 'bidir_sum_fwd_bwd': True, 'n_projs': 8}, {}),
({'enc_type': 'blstm', 'last_proj_dim': 5}, {}),
({'enc_type': 'blstm', 'last_proj_dim': 5, 'n_projs': 8}, {}),
({'enc_type': 'lstm', 'last_proj_dim': 5, 'n_projs': 8}, {}),
({'enc_type': 'blstm', 'bidir_sum_fwd_bwd': True, 'last_proj_dim': 5}, {}),
({'enc_type': 'blstm', 'bidir_sum_fwd_bwd': True, 'last_proj_dim': 5, 'n_projs': 8}, {}),
# subsampling
({'enc_type': 'blstm', 'subsample': "1_2_2_1", 'subsample_type': 'drop'}, {}),
({'enc_type': 'blstm', 'subsample': "1_2_2_1", 'subsample_type': 'concat'}, {}),
({'enc_type': 'blstm', 'subsample': "1_2_2_1", 'subsample_type': 'max_pool'}, {}),
({'enc_type': 'blstm', 'subsample': "1_2_2_1", 'subsample_type': 'conv1d'}, {}),
({'enc_type': 'blstm', 'subsample': "1_2_2_1", 'subsample_type': 'add'}, {}),
({'enc_type': 'blstm', 'subsample': "1_2_2_1", 'subsample_type': 'drop',
'bidir_sum_fwd_bwd': True}, {}),
({'enc_type': 'blstm', 'subsample': "1_2_2_1", 'subsample_type': 'concat',
'bidir_sum_fwd_bwd': True}, {}),
({'enc_type': 'blstm', 'subsample': "1_2_2_1", 'subsample_type': 'max_pool',
'bidir_sum_fwd_bwd': True}, {}),
({'enc_type': 'blstm', 'subsample': "1_2_2_1", 'subsample_type': 'conv1d',
'bidir_sum_fwd_bwd': True}, {}),
({'enc_type': 'blstm', 'subsample': "1_2_2_1", 'subsample_type': 'add',
'bidir_sum_fwd_bwd': True}, {}),
({'enc_type': 'blstm', 'subsample': "1_2_2_1", 'subsample_type': 'drop',
'n_projs': 8}, {}),
({'enc_type': 'blstm', 'subsample': "1_2_2_1", 'subsample_type': 'concat',
'n_projs': 8}, {}),
({'enc_type': 'blstm', 'subsample': "1_2_2_1", 'subsample_type': 'max_pool',
'n_projs': 8}, {}),
({'enc_type': 'blstm', 'subsample': "1_2_2_1", 'subsample_type': 'conv1d',
'n_projs': 8}, {}),
({'enc_type': 'blstm', 'subsample': "1_2_2_1", 'subsample_type': 'add',
'n_projs': 8}, {}),
# LC-BLSTM
({'enc_type': 'blstm', 'chunk_size_current': "0", 'chunk_size_right': "40"}, {}), # BLSTM for PT
({'enc_type': 'blstm', 'chunk_size_current': "40", 'chunk_size_right': "40"}, {}),
({'enc_type': 'blstm', 'bidir_sum_fwd_bwd': True,
'chunk_size_current': "0", 'chunk_size_right': "40"}, {}), # BLSTM for PT
({'enc_type': 'blstm', 'bidir_sum_fwd_bwd': True,
'chunk_size_current': "40", 'chunk_size_right': "40"}, {}),
({'enc_type': 'conv_blstm', 'bidir_sum_fwd_bwd': True,
'chunk_size_current': "40", 'chunk_size_right': "40"}, {}),
({'enc_type': 'conv_blstm', 'bidir_sum_fwd_bwd': True,
'chunk_size_current': "40", 'chunk_size_right': "40", 'rsp_prob': 0.5}, {}),
# LC-BLSTM + subsampling
({'enc_type': 'blstm', 'subsample': "1_2_1_1",
'chunk_size_right': "40"}, {}), # BLSTM for PT
({'enc_type': 'blstm', 'subsample': "1_2_1_1",
'chunk_size_current': "40", 'chunk_size_right': "40"}, {}),
({'enc_type': 'blstm', 'subsample': "1_2_1_1", 'bidir_sum_fwd_bwd': True,
'chunk_size_right': "40"}, {}), # BLSTM for PT
({'enc_type': 'blstm', 'subsample': "1_2_1_1", 'bidir_sum_fwd_bwd': True,
'chunk_size_current': "40", 'chunk_size_right': "40"}, {}),
({'enc_type': 'blstm', 'subsample': "1_2_1_1", 'bidir_sum_fwd_bwd': True,
'chunk_size_current': "40", 'chunk_size_right': "40", 'rsp_prob': 0.5}, {}),
# Multi-task
({'enc_type': 'blstm', 'n_layers_sub1': 2}, {}),
({'enc_type': 'blstm', 'n_layers_sub1': 2, 'task_specific_layer': True}, {}),
({'enc_type': 'blstm', 'n_layers_sub1': 2, 'task_specific_layer': True,
'chunk_size_current': "0", 'chunk_size_right': "40"}, {}), # BLSTM for PT
({'enc_type': 'blstm', 'n_layers_sub1': 2, 'task_specific_layer': True,
'chunk_size_current': "40", 'chunk_size_right': "40"}, {}), # LC-BLSTM
({'enc_type': 'blstm', 'n_layers_sub1': 2, 'task_specific_layer': True,
'chunk_size_current': "0", 'chunk_size_right': "40",
'rsp_prob': 0.5}, {}), # BLSTM for PT
({'enc_type': 'blstm', 'n_layers_sub1': 2, 'task_specific_layer': True,
'chunk_size_current': "40", 'chunk_size_right': "40",
'rsp_prob': 0.5}, {}), # LC-BLSTM
({'enc_type': 'blstm', 'n_layers_sub1': 2, 'n_layers_sub2': 1}, {}),
({'enc_type': 'blstm', 'n_layers_sub1': 2, 'n_layers_sub2': 1,
'task_specific_layer': True}, {}),
# Multi-task + subsampling
({'enc_type': 'blstm', 'subsample': "2_1_1_1", 'n_layers_sub1': 2,
'chunk_size_current': "0", 'chunk_size_right': "40",
'task_specific_layer': True}, {}), # BLSTM for PT
({'enc_type': 'blstm', 'subsample': "2_1_1_1", 'n_layers_sub1': 2,
'chunk_size_current': "40", 'chunk_size_right': "40",
'task_specific_layer': True}, {}), # LC-BLSTM
]
)
def test_forward(args, args_conv):
device = "cpu"
args = make_args(**args)
if 'conv' in args['enc_type']:
conv_module = importlib.import_module('neural_sp.models.seq2seq.encoders.conv')
args_conv = make_args_conv(**args_conv)
args['frontend_conv'] = conv_module.ConvEncoder(**args_conv).to(device)
bs = 4
xmaxs = [40, 45] if int(args['chunk_size_current'].split('_')[0]) == -1 else [400, 455]
module = importlib.import_module('neural_sp.models.seq2seq.encoders.rnn')
enc = module.RNNEncoder(**args).to(device)
for xmax in xmaxs:
xs = np.random.randn(bs, xmax, args['input_dim']).astype(np.float32)
xlens = torch.IntTensor([len(x) - i * enc.subsampling_factor for i, x in enumerate(xs)])
# shuffle
perm_ids = torch.randperm(bs)
xs = xs[perm_ids]
xlens = xlens[perm_ids]
xs = pad_list([np2tensor(x, device).float() for x in xs], 0.)
eout_dict = enc(xs, xlens, task='all')
assert eout_dict['ys']['xs'].size(0) == bs
assert eout_dict['ys']['xs'].size(1) == eout_dict['ys']['xlens'].max()
for b in range(bs):
if 'conv' in args['enc_type'] or args['subsample_type'] in ['max_pool', 'conv1d', 'drop', 'add']:
assert eout_dict['ys']['xlens'][b].item() == math.ceil(xlens[b].item() / enc.subsampling_factor)
else:
assert eout_dict['ys']['xlens'][b].item() == xlens[b].item() // enc.subsampling_factor
if args['n_layers_sub1'] > 0:
# all outputs
assert eout_dict['ys_sub1']['xs'].size(0) == bs
assert eout_dict['ys_sub1']['xs'].size(1) == eout_dict['ys_sub1']['xlens'].max()
for b in range(bs):
if 'conv' in args['enc_type'] or args['subsample_type'] in ['max_pool', 'conv1d', 'drop', 'add']:
assert eout_dict['ys_sub1']['xlens'][b].item() == math.ceil(
xlens[b].item() / enc.subsampling_factor_sub1)
else:
assert eout_dict['ys_sub1']['xlens'][b].item() == xlens[b].item() // enc.subsampling_factor_sub1
# single output
eout_dict_sub1 = enc(xs, xlens, task='ys_sub1')
assert eout_dict_sub1['ys_sub1']['xs'].size(0) == bs
assert eout_dict_sub1['ys_sub1']['xs'].size(1) == eout_dict['ys_sub1']['xlens'].max()
if args['n_layers_sub2'] > 0:
# all outputs
assert eout_dict['ys_sub2']['xs'].size(0) == bs
assert eout_dict['ys_sub2']['xs'].size(1) == eout_dict['ys_sub2']['xlens'].max()
for b in range(bs):
if 'conv' in args['enc_type'] or args['subsample_type'] in ['max_pool', 'conv1d', 'drop', 'add']:
assert eout_dict['ys_sub2']['xlens'][b].item() == math.ceil(
xlens[b].item() / enc.subsampling_factor_sub2)
else:
assert eout_dict['ys_sub2']['xlens'][b].item() == xlens[b].item() // enc.subsampling_factor_sub2
# single output
eout_dict_sub2 = enc(xs, xlens, task='ys_sub2')
assert eout_dict_sub2['ys_sub2']['xs'].size(0) == bs
assert eout_dict_sub2['ys_sub2']['xs'].size(1) == eout_dict_sub2['ys_sub2']['xlens'].max()
|
StarcoderdataPython
|
5154182
|
import os
from dataclasses import dataclass
import numpy as np
import pandas as pd
__all__ = ["Test1", "Test2", "assets", "scenarios", "outlook"]
assets = 'DMEQ', 'EMEQ', 'PE', 'RE', 'NB', 'EILB', 'CASH'
scenarios = 'Baseline', 'Goldilocks', 'Stagflation', 'HHT'
outlook = pd.read_csv(os.path.join(os.path.dirname(__file__), "scenario.csv"))
@dataclass
class Bounds:
DMEQ: float
EMEQ: float
PE: float
RE: float
NB: float
EILB: float
CASH: float
def as_array(self):
return [self.DMEQ, self.EMEQ, self.PE, self.RE, self.NB, self.EILB, self.CASH]
@dataclass
class CVAR:
Baseline: float
Goldilocks: float
Stagflation: float
HHT: float
def as_array(self):
return np.array([self.Baseline, self.Goldilocks, self.Stagflation, self.HHT])
def __getitem__(self, item: str):
return self.__dict__[item]
@dataclass
class Probability(CVAR):
pass
class Weights(Bounds):
pass
class OptimalMix(CVAR):
pass
@dataclass
class Expected:
Mix: OptimalMix
Baseline: Weights
Goldilocks: Weights
Stagflation: Weights
HHT: Weights
Optimal: Weights
@dataclass
class RegretTest:
lb: Bounds
ub: Bounds
cvar: CVAR
prob: Probability
expected: Expected
@property
def solutions(self):
return np.array([
self.expected.Baseline.as_array(),
self.expected.Goldilocks.as_array(),
self.expected.Stagflation.as_array(),
self.expected.HHT.as_array(),
])
@property
def optimal(self):
return self.expected.Optimal.as_array()
@property
def proportions(self):
return self.expected.Mix.as_array()
Test1 = RegretTest(
lb=Bounds(
DMEQ=0,
EMEQ=0,
PE=0.13,
RE=0.11,
NB=0,
EILB=0.05,
CASH=0.04,
),
ub=Bounds(
DMEQ=1,
EMEQ=0.18,
PE=0.13,
RE=0.11,
NB=1,
EILB=0.05,
CASH=0.04,
),
prob=Probability(
Baseline=0.57,
Goldilocks=0.1,
Stagflation=0.14,
HHT=0.19,
),
cvar=CVAR(
Baseline=-0.34,
Goldilocks=-0.253,
Stagflation=-0.501,
HHT=-0.562
),
expected=Expected(
Mix=OptimalMix(
Baseline=0.732,
Goldilocks=0,
Stagflation=0,
HHT=0.268
),
Baseline=Weights(
DMEQ=0.25,
EMEQ=0.18,
PE=0.13,
RE=0.11,
NB=0.24,
EILB=0.05,
CASH=0.04,
),
Goldilocks=Weights(
DMEQ=0.3485,
EMEQ=0.0947,
PE=0.13,
RE=0.11,
NB=0.2268,
EILB=0.05,
CASH=0.04,
),
Stagflation=Weights(
DMEQ=0.1,
EMEQ=0,
PE=0.13,
RE=0.11,
NB=0.57,
EILB=0.05,
CASH=0.04,
),
HHT=Weights(
DMEQ=0,
EMEQ=0,
PE=0.13,
RE=0.11,
NB=0.67,
EILB=0.05,
CASH=0.04,
),
Optimal=Weights(
DMEQ=0.183,
EMEQ=0.132,
PE=0.13,
RE=0.11,
NB=0.355,
EILB=0.05,
CASH=0.04,
),
)
)
Test2 = RegretTest(
lb=Bounds(
DMEQ=0,
EMEQ=0,
PE=0,
RE=0,
NB=0,
EILB=0,
CASH=0,
),
ub=Bounds(
DMEQ=1,
EMEQ=0.18,
PE=0.13,
RE=0.11,
NB=1,
EILB=0.05,
CASH=0.04,
),
prob=Probability(
Baseline=0.57,
Goldilocks=0.1,
Stagflation=0.14,
HHT=0.19,
),
cvar=CVAR(
Baseline=-0.34,
Goldilocks=-0.253,
Stagflation=-0.501,
HHT=-0.562
),
expected=Expected(
Mix=OptimalMix(
Baseline=0.765,
Goldilocks=0,
Stagflation=0,
HHT=0.235
),
Baseline=Weights(
DMEQ=0.25,
EMEQ=0.18,
PE=0.13,
RE=0.11,
NB=0.24,
EILB=0.05,
CASH=0.04,
),
Goldilocks=Weights(
DMEQ=0.357,
EMEQ=0.099,
PE=0.13,
RE=0.11,
NB=0.264,
EILB=0,
CASH=0.04,
),
Stagflation=Weights(
DMEQ=0.1,
EMEQ=0,
PE=0.13,
RE=0.11,
NB=0.57,
EILB=0.05,
CASH=0.04,
),
HHT=Weights(
DMEQ=0,
EMEQ=0,
PE=0,
RE=0,
NB=0.95,
EILB=0.05,
CASH=0,
),
Optimal=Weights(
DMEQ=0.191,
EMEQ=0.138,
PE=0.1,
RE=0.084,
NB=0.407,
EILB=0.05,
CASH=0.031,
),
)
)
|
StarcoderdataPython
|
6699883
|
##@package layer
#@author <NAME>
## Layer of the system.
# List the agents belonging to one layer.
class Layer:
## Total number of layers.
_layersNumber=0
## Default constructor.
# @param agentList Initial list of agents in the layer.
# @param name Name of the layer.
def __init__(self, agentList=[], name=""):
self._agentList=agentList
self._id=Layer._layersNumber
Layer._layersNumber+=1
self.rename(name)
## Perform the action of all agents in the layer.
# The data are modified by the actions of the agent.
# @param data Data needed for the action.
def act(self, data):
for a in self._agentList:
a.act(data,self)
## Add an agent to the agent list.
# @param agent Agent to add.
def addAgent(self,agent):
if agent not in agentList:
agentList.append(agent)
## Get the list of agents in the layer.
# @return List of agents
def agentList(self):
return self._agentList
## Get the layer's id.
def id(self):
return self._id
## Remove an agent from the agent list.
# @param agent Agent to add.
def removeAgent(self,agent):
agentList.remove(agent)
## Rename the layer.
# @param name New name.
# If an empty name is given the new name is "Layer #id".
def rename(self,name=""):
if name=="":
self.name="Layer #"+str(self._id)
else:
self.name=name
def __str__(self):
return self.name
# @var _agentList
# List of agents in this layer.
## @var _id
# Unique identifier of the layer.
## @var name
# Layer name.
|
StarcoderdataPython
|
11358706
|
# Copyright 2018 GoDaddy
# Copyright (c) 2015 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
from unittest import mock
from oslo_utils import uuidutils
from octavia.common import data_models
from octavia.statistics.drivers import update_db
from octavia.tests.unit import base
class TestStatsUpdateDb(base.TestCase):
def setUp(self):
super(TestStatsUpdateDb, self).setUp()
self.amphora_id = uuidutils.generate_uuid()
self.listener_id = uuidutils.generate_uuid()
@mock.patch('octavia.db.repositories.ListenerStatisticsRepository')
@mock.patch('octavia.db.api.get_session')
def test_update_stats(self, mock_get_session, mock_listener_stats_repo):
bytes_in1 = random.randrange(1000000000)
bytes_out1 = random.randrange(1000000000)
active_conns1 = random.randrange(1000000000)
total_conns1 = random.randrange(1000000000)
request_errors1 = random.randrange(1000000000)
stats_1 = data_models.ListenerStatistics(
listener_id=self.listener_id,
amphora_id=self.amphora_id,
bytes_in=bytes_in1,
bytes_out=bytes_out1,
active_connections=active_conns1,
total_connections=total_conns1,
request_errors=request_errors1
)
bytes_in2 = random.randrange(1000000000)
bytes_out2 = random.randrange(1000000000)
active_conns2 = random.randrange(1000000000)
total_conns2 = random.randrange(1000000000)
request_errors2 = random.randrange(1000000000)
stats_2 = data_models.ListenerStatistics(
listener_id=self.listener_id,
amphora_id=self.amphora_id,
bytes_in=bytes_in2,
bytes_out=bytes_out2,
active_connections=active_conns2,
total_connections=total_conns2,
request_errors=request_errors2
)
update_db.StatsUpdateDb().update_stats(
[stats_1, stats_2], deltas=False)
mock_listener_stats_repo().replace.assert_has_calls([
mock.call(mock_get_session(), stats_1),
mock.call(mock_get_session(), stats_2)
])
update_db.StatsUpdateDb().update_stats(
[stats_1, stats_2], deltas=True)
mock_listener_stats_repo().increment.assert_has_calls([
mock.call(mock_get_session(), stats_1),
mock.call(mock_get_session(), stats_2)
])
|
StarcoderdataPython
|
1681520
|
<reponame>albgar/aiida_siesta_plugin
#!/usr/bin/env runaiida
'''
This is an example of how to launch multiple SIESTA simulations iterating
over one or more parameters using the aiida_siesta plugin.
'''
#Not required by AiiDA
import os.path as op
import sys
#AiiDA classes and functions
from aiida.engine import submit
from aiida.orm import load_code
from aiida.orm import Float, Dict, StructureData, KpointsData
from aiida_pseudo.data.pseudo.psf import PsfData
from aiida_siesta.workflows.iterate import SiestaIterator
'''
First of all, we need to setup all the inputs of the calculations.
See https://aiida-siesta-plugin.readthedocs.io/en/stable/workflows/base.html#inputs.
Basically, we will build an "inputs" dict at the end of the file, and all we are doing until
there is to build every part of it step by step.
All this is general to any SIESTA calculation with AiiDa, so if you already know how it works,
go to the end of the file, where we really do the iterate specific stuff.
'''
# Load the version of siesta that we are going to use
try:
codename = sys.argv[1]
except IndexError:
codename = 'Siesta-4.0.2@kay'
code = load_code(codename)
# Generate the structure (or get it from somewhere else)
try:
# We can get it using sisl (useful if we have it in an *fdf, *XV, ...)
import sisl
ase_struct = sisl.geom.diamond(5.430, 'Si').toASE()
except:
# Or using ASE
import ase.build
ase_struct = ase.build.bulk('Si', 'diamond', 5.430)
# Then just pass it to StructureData, which is the type that Aiida works with
structure = StructureData(ase=ase_struct)
# Specify some parameters that go into the fdf file
parameters = Dict(
dict={
'xc-functional': 'LDA',
'xc-authors': 'CA',
'max-scfiterations': 40,
'dm-numberpulay': 4,
'dm-mixingweight': 0.3,
'dm-tolerance': 1.e-5,
'Solution-method': 'diagon',
'electronic-temperature': '25 meV',
})
# Extra parameters that also go to the fdf file, but are related
# to the basis.
basis = Dict(
dict={
'pao-energy-shift': '300 meV',
'%block pao-basis-sizes': """
Si DZP
%endblock pao-basis-sizes""",
})
# Define the kpoints for the simulations. Note that this is not passed as
# a normal fdf parameter, it has "its own input"
kpoints = KpointsData()
kpoints.set_kpoints_mesh([14, 14, 14])
# Get the appropiate pseudos (in "real life", one could have a pseudos family defined
# in aiida database with `verdi data psf uploadfamily <path to folder> <family name>`)
# and then pass it as a simple string, Aiida will know which pseudos to use.
# See the pseudo_family in the aiida_siesta docs (link on top of the file)
pseudos_dict = {}
raw_pseudos = [("Si.psf", ['Si'])]
for fname, kinds in raw_pseudos:
absname = op.realpath(op.join(op.dirname(__file__), "../fixtures/sample_psf", fname))
pseudo = PsfData.get_or_create(absname)
if not pseudo.is_stored:
print("\nCreated the pseudo for {}".format(kinds))
else:
print("\nUsing the pseudo for {} from DB: {}".format(kinds, pseudo.pk))
for j in kinds:
pseudos_dict[j]=pseudo
# Options that are related to how the job is technically submitted and
# run. Some of this options define flags for the job manager (e.g. SLURM)
# and some other's are related to how the code is executed. Note that
# 'max_wallclock_seconds' is a required option, so that SIESTA can stop
# gracefully before the job runs out of time.
options = Dict(
dict={
"max_wallclock_seconds": 360,
#'withmpi': True,
#'account': "<KEY>",
#'queue_name': "DevQ",
"resources": {
"num_machines": 1,
"num_mpiprocs_per_machine": 1,
}
})
# Now we have all inputs defined, so as promised at the beggining of the file
# we build the inputs dicts to pass it to the process. There's no need though,
# we could pass each input separately. This is just so that the process call
# looks cleaner.
inputs = {
'structure': structure,
'parameters': parameters,
'code': code,
'basis': basis,
'kpoints': kpoints,
'pseudos': pseudos_dict,
'options': options,
}
# Up until this point, all the things done have been general to any SIESTA
# simulation. Now, we will use the SiestaIterator workflow to launch SIESTA
# simulations iterating over parameters
# Iterate over meshcutoff
#process = submit(SiestaIterator, **inputs,
# iterate_over={
# 'meshcutoff': [100,200,300,400,500,600,700,800,900],
# },
# batch_size=Int(4)
#)
# Iterate over meshcutoff and energyshift at the same time
#process = submit(SiestaIterator, **inputs,
# iterate_over={
# 'meshcutoff': [100,200,300],
# 'pao-energyshift': [0.02, 0.01, 0.05]
# },
#)
# This will run three simulations with these values (meshcutoff, energyshift)
# (100, 0.02), (200, 0.01), (300, 0.05)
# But what if you want to try all the combinations?
# You can do so by setting the mode to "product"
process = submit(SiestaIterator, **inputs,
iterate_over={
'meshcutoff': [100,200,300],
'pao-energyshift': [0.02, 0.01, 0.05]
},
iterate_mode=Str('product'),
batch_size=Int(3) #This selects how many values run at the same time
)
# This will run nine simulations with these values (meshcutoff, energyshift)
# (100, 0.02), (100, 0.01), (300, 0.05), (200, 0.02), (200, 0.01) ...
# Print some info
print("Submitted workchain; ID={}".format(process.pk))
print(
"For information about this workchain type: verdi process show {}".format(
process.pk))
print("For a list of running processes type: verdi process list")
|
StarcoderdataPython
|
5091508
|
"""py.test configuration."""
import os
import tempfile
from pathlib import Path
import numpy as np
import nibabel as nb
import pytest
from dipy.data.fetcher import _make_fetcher, UW_RW_URL
_dipy_datadir_root = os.getenv("DMRIPREP_TESTS_DATA") or Path.home()
dipy_datadir = Path(_dipy_datadir_root) / ".cache" / "data"
dipy_datadir.mkdir(parents=True, exist_ok=True)
_make_fetcher(
"fetch_sherbrooke_3shell",
str(dipy_datadir),
UW_RW_URL + "1773/38475/",
["HARDI193.nii.gz", "HARDI193.bval", "HARDI193.bvec"],
["HARDI193.nii.gz", "HARDI193.bval", "HARDI193.bvec"],
[
"0b735e8f16695a37bfbd66aab136eb66",
"e9b9bb56252503ea49d31fb30a0ac637",
"0c83f7e8b917cd677ad58a078658ebb7",
],
doc="Download a 3shell HARDI dataset with 192 gradient direction",
)()
_sherbrooke_data = {
"dwi_file": dipy_datadir / "HARDI193.nii.gz",
"bvecs": np.loadtxt(dipy_datadir / "HARDI193.bvec").T,
"bvals": np.loadtxt(dipy_datadir / "HARDI193.bval"),
}
@pytest.fixture(autouse=True)
def doctest_autoimport(doctest_namespace):
"""Make available some fundamental modules to doctest modules."""
doctest_namespace["np"] = np
doctest_namespace["nb"] = nb
doctest_namespace["os"] = os
doctest_namespace["Path"] = Path
doctest_namespace["data_dir"] = Path(__file__).parent / "data" / "tests"
doctest_namespace["dipy_datadir"] = dipy_datadir
tmpdir = tempfile.TemporaryDirectory()
doctest_namespace["tmpdir"] = tmpdir.name
yield
tmpdir.cleanup()
@pytest.fixture()
def dipy_test_data(scope="session"):
"""Create a temporal directory shared across tests to pull data in."""
return _sherbrooke_data
|
StarcoderdataPython
|
399915
|
<filename>alloy2vec/processing/data/clean_corpus.py
import os,re,sys
# initializing bad_chars_list
bad_chars = [',', ';', ':', '!', '.', '(', ')', '"', "*"]
#filename="mat2vec-1a5b3240-abstracts-head.csv"
filename=sys.argv[1] #"mat2vec-1a5b3240-abstracts.csv"
print(filename)
filename_w="cleaned_"+filename
with open( filename_w, 'a+') as corpus_w:
with open(filename,"r") as corpus:
data=corpus.readlines()
for line in data:
#words=line.split(',') #(" ")
#split_again=str(words[0]).split(".")
#year=split_again[3]
#if int(year)>2020 or int(year)<1900:
# year=split_again[2]
#filename_i=os.path.join("corpus_by_year",str(year)+".csv")
#words_j=''
#for j in range(1,len(words)):
# words_j +=words[j]
#words_jj=re.split('.|,|"|:', str(words_j))
#words_k=''
#for k in range(0,len(words_jj)):
# words_k += words_jj[k]
# remove bad_chars
for ii in bad_chars :
words = line.replace(ii, '')
corpus_w.write(words + '\n')
|
StarcoderdataPython
|
1879229
|
# from.import invoice_report
|
StarcoderdataPython
|
5140239
|
<reponame>kkelchte/pilot
#!/usr/bin/python
# Block all numpy-scipy incompatibility warnings (could be removed at following scipy update (>1.1))
import warnings
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
import numpy as np
import os,sys, time
import argparse
import shutil
import subprocess, shlex
import json
import skimage.io as sio
import matplotlib.pyplot as plt
import ou_noise
class bcolors:
""" Colors to print in terminal with color!
"""
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
'''
augment_data.py:
In a factorized control the network should solely focus on the relationship between the obstacle in the foreground and the annotated control. Just to be sure that the factor does not put attention on the background, the background is filled with noise.
The experiments are done with two types of noise:
1. Uniform noise over all three channels
2. OU Noise in x and y direction for each channel
Runs are collected from .txt files
All runs are saved in destination with incrementing index and a set.txt file.
exit code:
2: not enough success runs so shutting down.
'''
print("\n {0} Augment data.py: started.".format(time.strftime("%Y-%m-%d_%I%M")))
# 1. Parse arguments and make settings
parser = argparse.ArgumentParser(description='Clean up dataset collected by a group of recordings that loop over different runs.')
parser.add_argument("--data_root", default="pilot_data/",type=str, help="Define the root folder of the different datasets.")
parser.add_argument("--mother_dir", default='', type=str, help="Define the mother dir in data_root with all runs (over rules endswith).")
parser.add_argument("--txt_set", default='', type=str, help="Define the run dirs with a train/val/test_set.txt.")
parser.add_argument("--destination", default='', type=str, help="Define the name of the final dataset.")
parser.add_argument("--noise_type", default='uni', type=str, help="Define how background of image is filled with noise: uni, ou")
parser.add_argument("--owr", action='store_true', help="If destination already exists, overwrite.")
parser.add_argument("--gray_min",default=170,type=int,help="The minimum gray value that channel should have to be put as background.")
parser.add_argument("--gray_max",default=180,type=int,help="The maximum gray value that channel should have to be put as background.")
FLAGS, others = parser.parse_known_args()
if FLAGS.data_root[0] != '/': # 2. Pilot_data directory for saving data
FLAGS.data_root=os.environ['HOME']+'/'+FLAGS.data_root
# get all runs
if len(FLAGS.txt_set) != 0:
runs=[r.strip() for r in open(FLAGS.data_root+FLAGS.mother_dir+'/'+FLAGS.txt_set,'r').readlines()]
else:
raise NotImplementedError("loop over different txt files not implemented yet, please provide --txt_set option")
# default destination is mother_dir + '_' + noise_type
if FLAGS.destination == '':
FLAGS.destination = FLAGS.data_root + FLAGS.mother_dir + '_' + FLAGS.noise_type
elif not FLAGS.destination.startswith('/'):
FLAGS.destination = FLAGS.data_root + FLAGS.destination
print("\nSettings:")
for f in FLAGS.__dict__: print("{0}: {1}".format( f, FLAGS.__dict__[f]))
# create destination
count_offset=0
if os.path.isdir(FLAGS.destination):
if FLAGS.owr:
shutil.rmtree(FLAGS.destination)
else:
count_offset = len([d for d in os.listdir(FLAGS.destination) if os.path.isdir(FLAGS.destination+'/'+d)])
print("Copy with offset as there were already {0} directories in {1}".format(count_offset, FLAGS.destination))
# raise NameError( 'Destination already exists, overwriting alert: '+ FLAGS.destination )
if not os.path.isdir(FLAGS.destination): os.makedirs(FLAGS.destination)
new_runs=[]
# for each run
for i,r in enumerate(runs):
print("{0}: {1}/{2} : {3}".format(time.strftime("%Y-%m-%d_%I%M"), i, len(runs), r))
# 1. copy run to destination
subprocess.call(shlex.split("cp -r {0} {1}/{2:05d}_{3}".format(r, FLAGS.destination, count_offset+i, os.path.basename(r).split('_')[1])))
# 2. mv RGB to RGB_old
subprocess.call(shlex.split("mv {1}/{2:05d}_{3}/RGB {1}/{2:05d}_{3}/RGB_old".format(r, FLAGS.destination, count_offset+i, os.path.basename(r).split('_')[1])))
# 3. create new RGB
os.makedirs("{0}/{1:05d}_{2}/RGB".format(FLAGS.destination, count_offset+i, os.path.basename(r).split('_')[1]))
# 4. for img in RGB_old
images=["{0}/{1:05d}_{2}/RGB_old/{3}".format(FLAGS.destination, count_offset+i, os.path.basename(r).split('_')[1],img) for img in os.listdir("{0}/{1:05d}_{2}/RGB_old".format(FLAGS.destination, count_offset+i, os.path.basename(r).split('_')[1]))]
for file_name in images:
# 4.1. read in image
img=sio.imread(file_name)
# 4.2. create mask
mask=np.zeros(img.shape) #initialize all negative
# go over each channel to filter vector wise
mask_0=mask[:,:,0]
mask_1=mask[:,:,1]
mask_2=mask[:,:,2]
img_0 = img[:,:,0]
img_1 = img[:,:,1]
img_2 = img[:,:,2]
for mask_i in [mask_0, mask_1, mask_2]:
for img_i in [img_0, img_1, img_2]:
mask_i[img_i<FLAGS.gray_min]=1
mask_i[img_i>FLAGS.gray_max]=1
# mask_0[img_0<FLAGS.gray_min]=1
# mask_0[img_0>FLAGS.gray_max]=1
# mask_1[img_1<FLAGS.gray_min]=1
# mask_1[img_1>FLAGS.gray_max]=1
# mask_2[img_2<FLAGS.gray_min]=1
# mask_2[img_2>FLAGS.gray_max]=1
# 4.3. create background and combine
if FLAGS.noise_type == 'uni':
background = np.random.randint(0,255+1,size=img.shape)
elif FLAGS.noise_type == 'ou':
theta=0.1
sigma=0.1
# create horizontal noise over the columns repeated over the rows
ou = ou_noise.OUNoise(3,0,theta,sigma)
horizontal_noise = []
for j in range(img.shape[1]):
horizontal_noise.append(np.asarray(256*ou.noise()+256/2.))
horizontal_noise = np.repeat(np.expand_dims(np.asarray(horizontal_noise), axis=0),img.shape[0],axis=0).astype(np.uint8)
# create vertical noise over the rows repeated over the columns
ou = ou_noise.OUNoise(3,0,theta,sigma)
vertical_noise = []
for j in range(img.shape[0]):
vertical_noise.append(np.asarray(256*ou.noise()+256/2.))
vertical_noise = np.repeat(np.expand_dims(np.asarray(vertical_noise), axis=1),img.shape[1],axis=1).astype(np.uint8)
# combine the two
background = (horizontal_noise + vertical_noise)/2.
# ensure it is uint8
background = background.astype(np.uint8)
else:
raise NotImplementedError("Type of noise is unknown:{0}".format(FLAGS.noise_type))
# 4.4. combine in new image
inv_mask=np.abs(mask-1)
combined=np.multiply(mask,img)+np.multiply(inv_mask,background)
# 4.5. save the image away
plt.imsave("{0}/{1:05d}_{2}/RGB/{3}".format(FLAGS.destination, count_offset+i, os.path.basename(r).split('_')[1], os.path.basename(file_name)),combined.astype(np.uint8))
# 5. append runs to set.txt
new_runs.append("{0}/{1:05d}_{2} \n".format(FLAGS.destination, count_offset+i, os.path.basename(r).split('_')[1]))
# 6. remove RGB_old
subprocess.call(shlex.split("rm -r {0}/{1:05d}_{2}/RGB_old".format(FLAGS.destination, count_offset+i, os.path.basename(r).split('_')[1])))
with open("{0}/{1}".format(FLAGS.destination, FLAGS.txt_set),'w') as new_set:
for l in new_runs: new_set.write(l)
print("\n {0} Augment data.py: finished.".format(time.strftime("%Y-%m-%d_%I%M")))
|
StarcoderdataPython
|
6573472
|
<reponame>ScSteffen/neuralEntropyComparison
'''
This is the script that gets called from the C++ KiT-RT method MLOptimizer.cpp
It initializes and loads a neural Closure
The call method performs a prediction
Author: <NAME>
Version: 0.0
Date 29.10.2020
'''
### imports ###
from src.neuralClosures.configModel import initNeuralClosure
import numpy as np
import tensorflow as tf
import os
import pandas as pd
from optparse import OptionParser
### global variable ###
neuralClosureModel = 0 # bm.initNeuralClosure(0,0)
### function definitions ###
def initModelCpp(input):
'''
input: string array consisting of [modelNumber,maxDegree_N, folderName]
modelNumber : Defines the used network model, i.e. MK1, MK2...
maxDegree_N : Defines the maximal Degree of the moment basis, i.e. the "N" of "M_N"
folderName: Path to the folder containing the neural network model
'''
print("|-------------------- Tensorflow initialization Log ------------------")
print("|")
modelNumber = input[0]
maxDegree_N = input[1]
# --- Transcribe the modelNumber and MaxDegree to the correct model folder --- #
folderName = "neuralClosure_M" + str(maxDegree_N) + "_MK" + str(modelNumber)
global neuralClosureModel
neuralClosureModel = initNeuralClosure(modelNumber, maxDegree_N, folderName)
neuralClosureModel.loadModel()
neuralClosureModel.model.summary()
print("|")
print("| Tensorflow neural closure initialized.")
print("|")
return 0
### function definitions ###
def initModel(modelNumber=1, polyDegree=0, spatialDim=3, folderName="testFolder", optimizer='adam', width=10, height=5):
'''
modelNumber : Defines the used network model, i.e. MK1, MK2...
maxDegree_N : Defines the maximal Degree of the moment basis, i.e. the "N" of "M_N"
'''
global neuralClosureModel
neuralClosureModel = initNeuralClosure(modelNumber, polyDegree, spatialDim, folderName, optimizer, width, height)
return 0
def callNetwork(input):
'''
# Input: input.shape = (nCells,nMaxMoment), nMaxMoment = 9 in case of MK3
# Output: Gradient of the network wrt input
'''
# predictions = neuralClosureModel.model.predict(input)
x_model = tf.Variable(input)
with tf.GradientTape() as tape:
# training=True is only needed if there are layers with different
# behavior during training versus inference (e.g. Dropout).
predictions = neuralClosureModel.model(x_model, training=False) # same as neuralClosureModel.model.predict(x)
gradients = tape.gradient(predictions, x_model)
return gradients
def callNetworkBatchwise(inputNetwork):
# Transform npArray to tfEagerTensor
x_model = tf.Variable(inputNetwork)
# Compute Autodiff tape
with tf.GradientTape() as tape:
# training=True is only needed if there are layers with different
# behavior during training versus inference (e.g. Dropout).
predictions = neuralClosureModel.model(x_model, training=False) # same as model.predict(x)
# Compute the gradients
gradients = tape.gradient(predictions, x_model)
# ---- Convert gradients from eagerTensor to numpy array and then to flattened c array ----
# Note: Use inputNetwork as array, since a newly generated npArray seems to cause a Segfault in cpp
(dimCell, dimBase) = inputNetwork.shape
for i in range(0, dimCell):
for j in range(0, dimBase):
inputNetwork[i, j] = gradients[i, j]
return inputNetwork
def main():
print("---------- Start Network Training Suite ------------")
print("Parsing options")
# --- parse options ---
parser = OptionParser()
parser.add_option("-b", "--batch", dest="batch", default=1000,
help="batch size", metavar="BATCH")
parser.add_option("-c", "--epochChunk", dest="epochchunk", default=1,
help="number of epoch chunks", metavar="EPOCHCHUNK")
parser.add_option("-d", "--degree", dest="degree", default=0,
help="max degree of moment", metavar="DEGREE")
parser.add_option("-e", "--epoch", dest="epoch", default=1000,
help="epoch count for neural network", metavar="EPOCH")
parser.add_option("-f", "--folder", dest="folder", default="testFolder",
help="folder where the model is stored", metavar="FOLDER")
parser.add_option("-l", "--loadModel", dest="loadmodel", default=1,
help="load model weights from file", metavar="LOADING")
parser.add_option("-m", "--model", dest="model", default=1,
help="choice of network model", metavar="MODEL")
parser.add_option("-n", "--normalized", dest="normalized", default=0,
help="train on normalized moments", metavar="NORMALIZED")
parser.add_option("-o", "--optimizer", dest="optimizer", default="Adam",
help="optimizer choice", metavar="OPTIMIZER")
parser.add_option("-p", "--processingmode", dest="processingmode", default=1,
help="gpu mode (1). cpu mode (0) ", metavar="PROCESSINGMODE")
parser.add_option("-s", "--spatialDimension", dest="spatialDimension", default=3,
help="spatial dimension of closure", metavar="SPATIALDIM")
parser.add_option("-t", "--training", dest="training", default=1,
help="training mode (1) execution mode (0)", metavar="TRAINING")
parser.add_option("-v", "--verbosity", dest="verbosity", default=1,
help="output verbosity keras (0 or 1)", metavar="VERBOSITY")
parser.add_option("-w", "--networkwidth", dest="networkwidth", default=10,
help="width of each network layer", metavar="WIDTH")
parser.add_option("-x", "--networkheight", dest="networkheight", default=5,
help="height of the network", metavar="HEIGHT")
(options, args) = parser.parse_args()
options.degree = int(options.degree)
options.spatialDimension = int(options.spatialDimension)
options.model = int(options.model)
options.epoch = int(options.epoch)
options.epochchunk = int(options.epochchunk)
options.batch = int(options.batch)
options.verbosity = int(options.verbosity)
options.loadmodel = int(options.loadmodel)
options.training = int(options.training)
options.processingmode = int(options.processingmode)
options.normalized = int(options.normalized)
options.networkwidth = int(options.networkwidth)
options.networkheight = int(options.networkheight)
# --- End Option Parsing ---
# witch to CPU mode, if wished
if options.processingmode == 0:
# Set CPU as available physical device
# Set CPU as available physical device
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
if tf.test.gpu_device_name():
print('GPU found. Using GPU')
else:
print("Disabled GPU. Using CPU")
# --- initialize model
print("Initialize model")
initModel(modelNumber=options.model, polyDegree=options.degree, spatialDim=options.spatialDimension,
folderName=options.folder,
optimizer=options.optimizer, width=options.networkwidth, height=options.networkheight)
neuralClosureModel.model.summary()
# Print chosen options to file
d = {'degree': [options.degree],
'spatial Dimension': [options.spatialDimension],
'model': [options.model],
'epoch': [options.epoch],
'epochChunk': [options.epochchunk],
'batchsize': [options.batch],
'verbosity': [options.verbosity],
'loadmodel': [options.loadmodel],
'training': [options.training],
'folder': [options.folder],
'optimizer': [options.optimizer],
'processingmode': [options.processingmode],
'normalized moments': [options.normalized]}
df = pd.DataFrame(data=d)
count = 0
cfgFile = neuralClosureModel.filename + '/config_001_'
while os.path.isfile(cfgFile + '.csv'):
count += 1
cfgFile = neuralClosureModel.filename + '/config_' + str(count).zfill(3) + '_'
cfgFile = cfgFile + '.csv'
print("Writing config to " + cfgFile)
df.to_csv(cfgFile, index=False)
if (options.loadmodel == 1 or options.training == 0):
# in execution mode the model must be loaded.
# load model weights
neuralClosureModel.loadModel()
else:
print("Start training with new weights")
if (options.training == 1):
# create training Data
neuralClosureModel.loadTrainingData(normalizedMoments=options.normalized)
# train model
neuralClosureModel.trainModel(valSplit=0.01, epochCount=options.epoch, epochChunks=options.epochchunk,
batchSize=options.batch, verbosity=options.verbosity,
processingMode=options.processingmode)
# save model
neuralClosureModel.saveModel()
# --- in execution mode, callNetwork or callNetworkBatchwise get called from c++ directly ---
return 0
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
6653908
|
<reponame>TG-Techie/tg-gui
from __future__ import annotations
from typing import TYPE_CHECKING, Generic, TypeVar, Protocol
from tg_gui_core import *
from tg_gui_core.shared import Missing, MissingType, as_any
if TYPE_CHECKING:
from typing import (
Callable,
ClassVar,
Any,
overload,
Literal,
Type,
TypeGuard,
TypeAlias,
)
from typing_extensions import Self
_C = TypeVar("_C", bound="Callable")
_OnupdateCallback = Callable[["_T"], None]
_OnupdateMthd = Callable[[Widget, "_T"], None]
Stateful: TypeAlias = "State[_T] | _T"
# ---
_T = TypeVar("_T")
_Widget = Widget
class ProxyProvider(Protocol[_T]):
def get_proxy(self, owner: Widget) -> Proxy[_T]:
...
class Proxy(Protocol[_T]):
def value(self, *, reader: Identifiable) -> _T:
...
def update(self, value: _T, *, writer: Identifiable) -> None:
...
def subscribe(
self,
*,
subscriber: Identifiable,
onupdate: Callable[[_T], None],
) -> Proxy[_T]:
...
def unsubscribe(self, *, subscriber: Identifiable) -> bool:
...
def isstate(__obj: State[_T] | _T) -> TypeGuard[State[_T]]:
return isinstance(__obj, State)
class State(Generic[_T]):
"""
These wrap a value to update widgets as the value changes.
State objects are usually auto-generated by the stateful descriptor but can be used as their own descriptors.
"""
_value: _T
_subscribed: dict[UID, _OnupdateCallback[_T]]
def get_proxy(self, owner: Widget) -> Proxy[_T]:
return self
def value(self, *, reader: Identifiable) -> _T:
return self._value
def update(self, value: _T, *, writer: Identifiable) -> None:
if value == self._value:
return
self._value = value
#
for uid, onupdate in self._subscribed.items():
if uid == writer.id:
continue
onupdate(value)
def subscribe(
self,
*,
subscriber: Identifiable,
onupdate: _OnupdateCallback[_T],
) -> Self:
if subscriber.id in self._subscribed:
raise ValueError(f"{subscriber} is already subscribed to {self}")
self._subscribed[subscriber.id] = onupdate
return self
def unsubscribe(self, *, subscriber: Identifiable) -> bool:
return self._subscribed.pop(subscriber.id, None) is not None
if TYPE_CHECKING:
@overload
def __get__(self, instance: _Widget, owner: Type[_Widget]) -> _T:
...
@overload
def __get__(self, instance: None, owner: Type[_Widget]) -> Self:
...
def __get__(self, instance, owner):
if instance is None:
return self
else:
return self.value(reader=instance)
def __set__(self, instance: _Widget, value: _T) -> None:
self.update(value, writer=instance)
def __bool__(self) -> bool:
raise TypeError("Cannot use a state as a boolean")
if TYPE_CHECKING:
def __new__(cls: type[Self], value: _T) -> _T:
...
else:
def __init__(self, value: _T) -> None:
# TODO: allow write locking based on id
self._value = value
self._subscribed: dict[UID, _OnupdateCallback[_T]] = {}
_T = TypeVar("_T")
class StatefulAttr(WidgetAttr[_T]):
_onupdate: _OnupdateMthd[_T] | None
# TODO: add get_attr, set_attr, and init_attr methods
def init_attr(self, owner: _Widget, value: _T | State[_T] | MissingType) -> None:
setattr(owner, self.private_name, value)
if value is not Missing and isstate(value):
self._subscribe_to_state(owner, value)
def del_attr(self, owner: _Widget) -> None:
# unsubscribe from the old state if it is a state
existing = self.get_raw_attr(owner)
if isstate(existing):
existing.unsubscribe(subscriber=owner)
def get_raw_attr(self, widget: _Widget) -> _T | State[_T]:
"""
returns the unsugared instance attribute value. This may be a raw value or a State instance that wraps that value.
"""
return getattr(widget, self.private_name)
def get_attr(self, owner: _Widget) -> _T:
attr: _T | State[_T] = self.get_raw_attr(owner)
if isstate(attr):
value = attr.value(reader=owner)
else:
assert not isinstance(attr, State)
value = attr
return value
def get_proxy(self, owner: _Widget) -> State[_T]:
existing = self.get_raw_attr(owner)
if isstate(existing):
return existing
else:
assert not isinstance(existing, State)
# auto-generate a state, set it and re-turn it
value: State[_T] = as_any(State(existing))
self._subscribe_to_state(owner, value)
setattr(owner, self.private_name, value)
return value
def set_onupdate(self, onupdate: _OnupdateMthd[_T]) -> _OnupdateMthd[_T]:
# make sure one is not already set
if self._onupdate is not None:
raise ValueError(f"onupdate is already proided for {self}.")
else:
# TODO: fix this?
self._onupdate = onupdate
return onupdate
def _subscribe_to_state(self, owner: _Widget, state: State[_T]) -> None:
if self._onupdate is not None:
state.subscribe(
subscriber=owner,
onupdate=getattr(owner, self._onupdate.__name__),
)
# NOTE: this uses pep 681, which is to be approved
if TYPE_CHECKING:
# NOTE: this uses pep 681 that is yet to be approved.
# see tg_gui_core.attrs.py for what this is overriding.
@overload
def __new__(
cls,
default: State[_T] | _T,
onupdate: _OnupdateCallback | None = None,
*,
init: Literal[True] = True,
) -> Any:
...
@overload
def __new__(
cls,
*,
factory: Callable[[], _T],
onupdate: _OnupdateCallback | None = None,
init: Literal[True] = True,
) -> Any:
...
@overload
def __new__(
cls,
*,
init: Literal[True] = True,
onupdate: _OnupdateCallback | None = None,
kw_only: bool = True,
) -> Any:
...
def __new__(cls, *_, **__) -> Any:
...
else:
pass
# see below for the init implementation
def __widattr_init__(
self: StatefulAttr[_T],
default: _T | State[_T] | MissingType = Missing,
# onupdate: _OnupdateCallback[_T] | None = None,
*,
factory: Callable[[], _T] | MissingType = Missing,
init: Literal[True] = True,
kw_only: bool | MissingType = Missing,
) -> None:
assert init is True, "init must be True"
if default is not Missing:
assert (
factory is Missing
), f"{self.__class__.__name__}(...) got arguments for 'default' and 'factory', only one is allowed."
assert (
kw_only is Missing
), f"{self.__class__.__name__}(...) got arguments for 'default' and 'kw_only', only one is allowed."
super(StatefulAttr, self).__widattr_init__(default=default, init=True) # type: ignore
elif factory is not Missing:
assert (
kw_only is Missing
), f"{self.__class__.__name__}(...) got arguments for 'factory' and 'kw_only', only one is allowed."
super(StatefulAttr, self).__widattr_init__(default_factory=factory, init=True) # type: ignore
else:
super(StatefulAttr, self).__widattr_init__(init=True, kw_only=kw_only or True) # type: ignore
self._onupdate = None
|
StarcoderdataPython
|
1866825
|
<reponame>Symantec/ccs-api-samples
# Script to Search the asset group in the CCS system
# For more details Refer the CCS REST API document at : https://apidocs.symantec.com/home/CCS
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
# Variable
# Replace the <hostname> with CCS application server host name
# Replace the <port number> with the configured port number for REST API, Default Port Number : 12431
# Replace the <user name> and <password> with valid CCS user name and password for example: UserName = domain1\\administrator, password = <PASSWORD>
HostName = '<hostname>'
PortNumber = '<port number>'
UserName = '<user name>'
Password = '<password>'
# Function to generate CCS REST API access token
def getToken():
urlToken = "https://" + HostName + ":" + PortNumber + "/ccs/api/v1/oauth/tokens"
payload = "grant_type=password&username=" + UserName + "&password=" + Password +""
headers = {'Content-Type': "application/json"}
responseToken = requests.request("POST", urlToken, data=payload, headers=headers, verify=False)
autheticationresult = responseToken.status_code
if (autheticationresult!=200) :
print("\nToken Generation Failed. Please check if the REST API is enabled and User name and password is correct\n")
exit()
tokenDict = responseToken.json()
token = tokenDict['access_token']
refreshToken = tokenDict['refresh_token']
print("bearer Token is:\n")
print(token)
print("\n Refresh Token is:\n")
print(refreshToken)
return token
# CCS asset group URI
url = "https://" + HostName + ":" + PortNumber + "/ccs/api/v1/AssetGroup"
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
bearertoken = "Bearer " + getToken()
headers = {
'Authorization': bearertoken
}
# Simple Search
querystring = {"Attributes":"(displayName = All DB2 *)"}
# Advanced Search
#querystring = {"Attributes":"(displayName = *)","ContainerPath":"Asset system","SearchSubTree":"True"}
response = requests.request("GET", url, headers=headers, params=querystring, verify=False)
GroupData = response.json()
print(response.text)
|
StarcoderdataPython
|
1754028
|
<filename>252_meeting_rooms.py
#
# 252. Meeting Rooms
#
# Q: https://leetcode.com/problems/meeting-rooms/
# A: https://leetcode.com/problems/meeting-rooms/discuss/919342/Kt-Js-Py3-Cpp-Sort-%2B-Scan
#
from typing import List
class Solution:
def canAttendMeetings(self, A: List[List[int]], last = 0) -> bool:
for i, j in sorted(A):
if not (last <= i):
return False
last = j
return True
|
StarcoderdataPython
|
3200312
|
<reponame>mickypaganini/everware<filename>everware/home_handler.py
from tornado import web, gen
from docker.errors import NotFound
from jupyterhub.handlers.base import BaseHandler
from IPython.html.utils import url_path_join
from tornado.httputil import url_concat
from tornado.httpclient import HTTPRequest, AsyncHTTPClient
import json
import re
@gen.coroutine
def _fork_github_repo(url, token):
http_client = AsyncHTTPClient()
headers={"User-Agent": "JupyterHub",
"Authorization": "token {}".format(token)
}
result = re.findall('^https://github.com/([^/]+)/([^/]+).*', url)
if not result:
raise ValueError('URL is not a github URL')
owner, repo = result[0]
api_url = "https://api.github.com/repos/%s/%s/forks" % (owner, repo)
req = HTTPRequest(api_url,
method="POST",
headers=headers,
body='',
)
resp = yield http_client.fetch(req)
return json.loads(resp.body.decode('utf8', 'replace'))
@gen.coroutine
def _github_fork_exists(username, url, token):
http_client = AsyncHTTPClient()
headers={"User-Agent": "JupyterHub",
"Authorization": "token {}".format(token)
}
result = re.findall('^https://github.com/([^/]+)/([^/]+).*', url)
if not result:
raise ValueError('URL (%s) is not a github URL' % url)
owner, repo = result[0]
api_url = "https://api.github.com/repos/%s/%s" % (username, repo)
req = HTTPRequest(api_url,
method="GET",
headers=headers,
)
try:
resp = yield http_client.fetch(req)
return True
except:
return False
@gen.coroutine
def _repository_changed(user):
try:
setup = yield user.spawner.docker(
'exec_create',
container=user.spawner.container_id,
cmd="bash -c 'cd $JPY_WORKDIR && \
(git fetch --unshallow > /dev/null 2>&1; true) && \
git diff --name-only'",
)
out = yield user.spawner.docker(
'exec_start',
exec_id=setup['Id'],
)
except NotFound:
return False
if out:
return True
else:
return False
@gen.coroutine
def _push_github_repo(user, url, commit_sha, branch_name, token):
result = re.findall('^https://github.com/([^/]+)/([^/]+).*', url)
if not result:
raise ValueError('URL is not a github URL')
owner, repo = result[0]
fork_url = "https://{}@github.com/{}/{}".format(token, user.name, repo)
out = yield user.spawner.docker(
'exec_create',
container=user.spawner.container_id,
cmd="bash -c 'cd $JPY_WORKDIR && \
git config --global user.email \"<EMAIL>\" && \
git config --global user.name \"Everware\" && \
(git fetch --unshallow; true) && \
git add . && \
git commit -m \"Update through everware\" && \
(git remote add everware-fork {fork_url}; true) && \
git push -f everware-fork {branch_name}'".format(
fork_url=fork_url,
commit_sha=commit_sha,
branch_name=branch_name
),
)
response = yield user.spawner.docker(
'exec_start',
exec_id=out['Id'],
)
class HomeHandler(BaseHandler):
"""Render the user's home page."""
@web.authenticated
@gen.coroutine
def get(self):
user = self.get_current_user()
repourl = self.get_argument('repourl', '')
do_fork = self.get_argument('do_fork', False)
do_push = self.get_argument('do_push', False)
if repourl:
self.log.info('Got %s in home' % repourl)
self.redirect(url_concat(
url_path_join(self.hub.server.base_url, 'spawn'), {
'repourl': repourl
}
))
return
branch_name = commit_sha = None
repo_url = ''
fork_exists = False
repository_changed = False
if user.running and hasattr(user, 'login_service'):
branch_name = user.spawner.branch_name
commit_sha = user.spawner.commit_sha
if user.login_service == "github":
if do_fork:
self.log.info('Will fork %s' % user.spawner.repo_url)
yield _fork_github_repo(
user.spawner.repo_url,
user.token,
)
self.redirect('/hub/home')
return
if do_push:
self.log.info('Will push to fork')
yield _push_github_repo(
user,
user.spawner.repo_url,
commit_sha,
branch_name,
user.token,
)
self.redirect('/hub/home')
return
repo_url = user.spawner.repo_url
fork_exists = yield _github_fork_exists(
user.name,
user.spawner.repo_url,
user.token,
)
repository_changed = yield _repository_changed(user)
if hasattr(user, 'login_service'):
loginservice = user.login_service
else:
loginservice = 'none'
html = self.render_template('home.html',
user=user,
repourl=repo_url,
login_service=loginservice,
fork_exists=fork_exists,
repository_changed=repository_changed,
branch_name=branch_name,
commit_sha=commit_sha
)
self.finish(html)
|
StarcoderdataPython
|
1762666
|
<gh_stars>0
''' Bool - A mutable bool class
##################################################
###### AUTOGENERATED - DO NOT EDIT DIRECTLY ######
##################################################
'''
from mutable_primitives.base import Mutable
class Bool(Mutable):
''' Bool - A mutable bool class '''
base = bool
def __init__(self, val):
super(Bool, self).__init__(val, self.base) #pylint: disable=super-with-arguments
self.val = val
def get(self):
''' get raw (primitive) value '''
return self.val
def set(self, val):
''' set raw (primitive) value '''
assert isinstance(val, self.base)
self.val = val
def __eq__(self, other):
return self.val == other
def __ne__(self, other):
return self.val != other
def __str__(self):
return '{}({})'.format(self.__class__.__name__, self.val)
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, self.val)
def __bool__(self):
''' boolean test for python3 '''
if self.val:
return True
return False
def __nonzero__(self):
''' boolean test for python2 '''
if self.val:
return True
return False
|
StarcoderdataPython
|
298714
|
<reponame>kipkurui/gimmemotifs
# Copyright (c) 2009-2010 <NAME> <<EMAIL>>
#
# This module is free software. You can redistribute it and/or modify it under
# the terms of the MIT License, see the file COPYING included with this
# distribution.
""" Odds and ends that for which I didn't (yet) find another place """
# Python imports
import os
import re
import sys
import random
import tempfile
from math import log
from string import strip
from subprocess import Popen, PIPE
# External imports
import numpy
from scipy import special
from gimmemotifs import tools
from gimmemotifs.fasta import *
lgam = special.gammaln
def run_command(cmd):
#print args
from subprocess import Popen
p = Popen(cmd, shell=True)
p.communicate()
def star(stat, categories):
stars = 0
for c in sorted(categories):
if stat >= c:
stars += 1
else:
return stars
return stars
def phyper_single(k, good, bad, N):
return numpy.exp(lgam(good+1) - lgam(good-k+1) - lgam(k+1) + lgam(bad+1) - lgam(bad-N+k+1) - lgam(N-k+1) - lgam(bad+good+1) + lgam(bad+good-N+1) + lgam(N+1))
def phyper(k, good, bad, N):
""" Current hypergeometric implementation in scipy is broken, so here's the correct version """
pvalues = [phyper_single(x, good, bad, N) for x in range(k + 1, N + 1)]
return numpy.sum(pvalues)
def divide_file(file, sample, rest, fraction, abs_max):
lines = open(file).readlines()
#random.seed()
random.shuffle(lines)
x = int(fraction * len(lines))
if x > abs_max:
x = abs_max
tmp = tempfile.NamedTemporaryFile()
# Fraction as sample
for line in lines[:x]:
tmp.write(line)
tmp.flush()
# Make sure it is sorted for tools that use this information (MDmodule)
stdout,stderr = Popen("sort -k4gr %s > %s" % (tmp.name, sample), shell=True).communicate()
tmp.close()
if stderr:
print "Something went wrong: %s" % stderr
sys.exit()
# Rest
f = open(rest, "w")
for line in lines[x:]:
f.write(line)
f.close()
#if os.path.exists(tmp.name):
# os.unlink(tmp.name)
return x, len(lines[x:])
def divide_fa_file(file, sample, rest, fraction, abs_max):
fa = Fasta(file)
ids = fa.ids[:]
x = int(fraction * len(ids))
if x > abs_max:
x = abs_max
sample_seqs = random.sample(ids, x)
# Rest
f_sample = open(sample, "w")
f_rest = open(rest, "w")
for id,seq in fa.items():
if id in sample_seqs:
f_sample.write(">%s\n%s\n" % (id, seq))
else:
f_rest.write(">%s\n%s\n" % (id, seq))
f_sample.close()
f_rest.close()
return x, len(ids[x:])
def make_gff_histogram(gfffile, outfile, l, title, breaks=21):
try:
import matplotlib.pyplot as plt
except:
pass
data = []
for line in open(gfffile):
vals = line.strip().split("\t")
data.append((int(vals[3]) + int(vals[4])) / 2)
plt.hist(data, breaks)
plt.title(title)
plt.savefig(outfile)
def ks_pvalue(values, l):
from scipy.stats import kstest
from numpy import array
if len(values) == 0:
return 1.0
a = array(values, dtype="float") / l
return kstest(a, "uniform")[1]
def write_equalwidth_bedfile(bedfile, width, outfile):
"""Read input from <bedfile>, set the width of all entries to <width> and
write the result to <outfile>.
Input file needs to be in BED or WIG format."""
BUFSIZE = 10000
f = open(bedfile)
out = open(outfile, "w")
lines = f.readlines(BUFSIZE)
line_count = 0
while lines:
for line in lines:
line_count += 1
if not line.startswith("#") and not line.startswith("track") and not line.startswith("browser"):
vals = line.strip().split("\t")
try:
start, end = int(vals[1]), int(vals[2])
except:
print "Error on line %s while reading %s. Is the file in BED or WIG format?" % (line_count, bedfile)
sys.exit(1)
start = (start + end) / 2 - (width / 2)
# This shifts the center, but ensures the width is identical... maybe not ideal
if start < 0:
start = 0
end = start + width
# Keep all the other information in the bedfile if it's there
if len(vals) > 3:
out.write("%s\t%s\t%s\t%s\n" % (vals[0], start, end, "\t".join(vals[3:])))
else:
out.write("%s\t%s\t%s\n" % (vals[0], start, end))
lines = f.readlines(BUFSIZE)
out.close()
f.close()
def get_significant_motifs(motifs, fg_fasta, bg_fasta, e_cutoff=None, p_cutoff=None, save_result=None):
pass
class MotifMatch:
def __init__(self, seq, name, instance, start, end, strand, score):
self.sequence = seq
self.motif_name = name
self.motif_instance = instance
self.start = start
self.end = end
self.strand = strand
self.score = score
class MotifResult:
def __init__(self):
self.raw_output = ""
self.datetime = ""
self.command = ""
self.fastafile = ""
self.params = {}
self.program = ""
self.feature = ""
self.sequences = {}
self.motifs = {}
self.matches = {}
def to_gff(self, gb_format=False):
p = re.compile(r'([\w_]+):(\d+)-(\d+)')
gff_output = ""
for seq, dict in self.matches.items():
for motif, mms in dict.items():
for mm in mms:
print_seq = seq
(start, end) = (mm.start, mm.end)
if gb_format:
m = p.match(seq)
if m:
print_seq = m.group(1)
start = int(start) + int(m.group(2)) - 1
end = int(end) + int(m.group(2)) - 1
gff_output += "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % (print_seq,
self.program,
self.feature,
start,
end,
mm.score,
mm.strand,
".",
"motif_name \"%s\" ; motif_instance \"%s\"" % (mm.motif_name, mm.motif_instance))
return gff_output[:-1]
def seqn(self):
return len(self.sequences.keys())
def parse_gff(gff_file, lowmem=False):
mr = MotifResult()
total = 0
f = open(gff_file)
BUFSIZE = 10000000
while 1:
lines = f.readlines(BUFSIZE)
if not lines:
break
for line in lines:
vals = line.strip().split("\t")
if len(vals) == 9:
(seq, program, feature, start, end, score, strand, bla, extra) = vals
(motif_name, motif_instance) = map(strip, extra.split(";"))
motif_name = motif_name.split(" ")[1][1:-1]
motif_instance = motif_instance.split(" ")[1][1:-1]
mr.sequences[seq] = 1
if not(mr.motifs.has_key(motif_name)):
mr.motifs[motif_name] = {}
if not(mr.motifs[motif_name].has_key(seq)):
mr.motifs[motif_name][seq] = 0
mr.motifs[motif_name][seq] += 1
else:
sys.stderr.write("Error parsing line in %s\n%s\n" % (gff_file, line))
total += len(lines)
return mr
def scan_fasta_file_with_motifs(fastafile, motiffile, threshold, gfffile, scan_rc=True, nreport=1):
error = None
try:
from gimmemotifs.fasta import Fasta
from gimmemotifs.motif import pwmfile_to_motifs
motifs = pwmfile_to_motifs(motiffile)
fa = Fasta(fastafile)
for motif in motifs:
motif.pwm_scan_to_gff(fa, gfffile, nreport=nreport, cutoff=float(threshold), scan_rc=scan_rc, append=True)
except Exception,e :
error = e
return error
def calc_motif_enrichment(sample, background, mtc=None, len_sample=None, len_back=None):
"""Calculate enrichment based on hypergeometric distribution"""
INF = "Inf"
# Local imports to enable parellel Python calls
from scipy.stats import hypergeom
if mtc not in [None, "Bonferroni", "Benjamini-Hochberg", "None"]:
raise RuntimeError, "Unknown correction: %s" % mtc
sig = {}
p_value = {}
n_sample = {}
n_back = {}
if not(len_sample):
len_sample = sample.seqn()
if not(len_back):
len_back = background.seqn()
for motif in sample.motifs.keys():
p = "NA"
s = "NA"
q = len(sample.motifs[motif])
m = 0
if(background.motifs.get(motif)):
m = len(background.motifs[motif])
n = len_back - m
k = len_sample
p = phyper(q - 1, m, n, k)
if p != 0:
s = -(log(p)/log(10))
else:
s = INF
else:
s = INF
p = 0.0
sig[motif] = s
p_value[motif] = p
n_sample[motif] = q
n_back[motif] = m
if mtc == "Bonferroni":
for motif in p_value.keys():
if p_value[motif] != "NA":
p_value[motif] = p_value[motif] * len(p_value.keys())
if p_value[motif] > 1:
p_value[motif] = 1
elif mtc == "Benjamini-Hochberg":
motifs = p_value.keys()
motifs.sort(cmp=lambda x,y: -cmp(p_value[x],p_value[y]))
l = len(p_value)
c = l
for m in motifs:
if p_value[motif] != "NA":
p_value[m] = p_value[m] * l / c
c -= 1
return (sig, p_value, n_sample, n_back)
def calc_enrichment(sample, background, len_sample, len_back, mtc=None):
"""Calculate enrichment based on hypergeometric distribution"""
INF = "Inf"
# Local imports to enable parellel Python calls
from scipy.stats import hypergeom
if mtc not in [None, "Bonferroni", "Benjamini-Hochberg", "None"]:
raise RuntimeError, "Unknown correction: %s" % mtc
sig = {}
p_value = {}
n_sample = {}
n_back = {}
for motif in sample.keys():
p = "NA"
s = "NA"
q = sample[motif]
m = 0
if(background[motif]):
m = background[motif]
n = len_back - m
k = len_sample
p = phyper(q - 1, m, n, k)
if p != 0:
s = -(log(p)/log(10))
else:
s = INF
else:
s = INF
p = 0.0
sig[motif] = s
p_value[motif] = p
n_sample[motif] = q
n_back[motif] = m
if mtc == "Bonferroni":
for motif in p_value.keys():
if p_value[motif] != "NA":
p_value[motif] = p_value[motif] * len(p_value.keys())
if p_value[motif] > 1:
p_value[motif] = 1
elif mtc == "Benjamini-Hochberg":
motifs = p_value.keys()
motifs.sort(cmp=lambda x,y: -cmp(p_value[x],p_value[y]))
l = len(p_value)
c = l
for m in motifs:
if p_value[motif] != "NA":
p_value[m] = p_value[m] * l / c
c -= 1
return (sig, p_value, n_sample, n_back)
def gff_enrichment(sample, background, numsample, numbackground, outfile):
data_sample = parse_gff(sample)
data_bg = parse_gff(background)
(e,p,ns,nb) = calc_motif_enrichment(data_sample, data_bg, "Benjamini-Hochberg", numsample, numbackground)
out = open(outfile, "w")
out.write("Motif\tSig\tp-value\t# sample\t# background\tEnrichment\n")
for m in e.keys():
if nb[m] > 0:
enrich = (ns[m] / float(numsample)) / (nb[m] / float(numbackground))
out.write("%s\t%s\t%s\t%s\t%s\t%0.3f\n" % (m, e[m], p[m], ns[m], nb[m], enrich))
else:
out.write("%s\t%s\t%s\t%s\t%s\tInf\n" % (m, e[m], p[m], ns[m], nb[m]))
out.close()
def is_valid_bedfile(bedfile, columns=6):
f = open(bedfile)
for i, line in enumerate(f.readlines()):
if not (line.startswith("browser") or line.startswith("track")):
vals = line.split("\t")
# Gene file should be at least X columns
if len(vals) < columns:
sys.stderr.write("Error in line %s: we need at least %s columns!\n" % (i, columns))
return False
# Check coordinates
try:
int(vals[1]), int(vals[2])
except ValueError:
sys.stderr.write("Error in line %s: coordinates in column 2 and 3 need to be integers!\n" % (i))
return False
if columns >= 6:
# We need the strand
if vals[5] not in ["+", "-"]:
sys.stderr.write("Error in line %s: column 6 (strand information) needs to be + or -" % (i))
return False
f.close()
return True
def median_bed_len(bedfile):
f = open(bedfile)
l = []
for i, line in enumerate(f.readlines()):
if not (line.startswith("browser") or line.startswith("track")):
vals = line.split("\t")
try:
l.append(int(vals[2]) - int(vals[1]))
except:
sys.stderr.write("Error in line %s: coordinates in column 2 and 3 need to be integers!\n" % (i))
sys.exit(1)
f.close()
return numpy.median(l)
def locate_tool(tool, verbose=True):
tool = re.sub(r'[^a-zA-Z]','',tool)
m = eval("tools." + tool)()
bin = which(m.cmd)
if bin:
print "Found %s in %s" % (m.name, bin)
return bin
else:
print "Couldn't find %s" % m.name
def motif_localization(fastafile, motif, width, outfile, cutoff=0.9):
NR_HIST_MATCHES = 100
from gimmemotifs.plot import plot_histogram
from gimmemotifs.utils import ks_pvalue
from gimmemotifs.fasta import Fasta
from numpy import array
matches = motif.pwm_scan(Fasta(fastafile), cutoff=cutoff, nreport=NR_HIST_MATCHES)
if len(matches) > 0:
ar = []
for a in matches.values():
ar += a
matches = array(ar)
p = ks_pvalue(matches, width - len(motif))
plot_histogram(matches - width / 2 + len(motif) / 2, outfile, xrange=(-width / 2, width / 2), breaks=21, title="%s (p=%0.2e)" % (motif.id, p), xlabel="Position")
return motif.id, p
else:
return motif.id, 1.0
def parse_cutoff(motifs, cutoff, default=0.9):
""" Provide either a file with one cutoff per motif or a single cutoff
returns a hash with motif id as key and cutoff as value
"""
cutoffs = {}
if os.path.isfile(str(cutoff)):
for i,line in enumerate(open(cutoff)):
if line != "Motif\tScore\tCutoff\n":
try:
motif,v,c = line.strip().split("\t")
c = float(c)
cutoffs[motif] = c
except Exception as e:
sys.stderr.write("Error parsing cutoff file, line {0}: {1}\n".format(e, i + 1))
sys.exit(1)
else:
for motif in motifs:
cutoffs[motif.id] = float(cutoff)
for motif in motifs:
if not cutoffs.has_key(motif.id):
sys.stderr.write("No cutoff found for {0}, using default {1}\n".format(motif.id, default))
cutoffs[motif.id] = default
return cutoffs
def _treesort(order, nodeorder, nodecounts, tree):
# From the Pycluster library, <NAME>
# Find the order of the nodes consistent with the hierarchical clustering
# tree, taking into account the preferred order of nodes.
nNodes = len(tree)
nElements = nNodes + 1
neworder = numpy.zeros(nElements)
clusterids = numpy.arange(nElements)
for i in range(nNodes):
i1 = tree[i].left
i2 = tree[i].right
if i1 < 0:
order1 = nodeorder[-i1-1]
count1 = nodecounts[-i1-1]
else:
order1 = order[i1]
count1 = 1
if i2 < 0:
order2 = nodeorder[-i2-1]
count2 = nodecounts[-i2-1]
else:
order2 = order[i2]
count2 = 1
# If order1 and order2 are equal, their order is determined
# by the order in which they were clustered
if i1 < i2:
if order1 < order2:
increase = count1
else:
increase = count2
for j in range(nElements):
clusterid = clusterids[j]
if clusterid == i1 and order1 >= order2:
neworder[j] += increase
if clusterid == i2 and order1 < order2:
neworder[j] += increase
if clusterid == i1 or clusterid == i2:
clusterids[j] = -i-1
else:
if order1 <= order2:
increase = count1
else:
increase = count2
for j in range(nElements):
clusterid = clusterids[j]
if clusterid == i1 and order1 > order2:
neworder[j] += increase
if clusterid == i2 and order1 <= order2:
neworder[j] += increase
if clusterid == i1 or clusterid == i2:
clusterids[j] = -i-1
return numpy.argsort(neworder)
def sort_tree(tree, order):
# Adapted from the Pycluster library, <NAME>
nnodes = len(tree)
nodeindex = 0
nodecounts = numpy.zeros(nnodes, int)
nodeorder = numpy.zeros(nnodes)
nodedist = numpy.array([node.distance for node in tree])
for nodeindex in range(nnodes):
min1 = tree[nodeindex].left
min2 = tree[nodeindex].right
if min1 < 0:
index1 = -min1-1
order1 = nodeorder[index1]
counts1 = nodecounts[index1]
nodedist[nodeindex] = max(nodedist[nodeindex],nodedist[index1])
else:
order1 = order[min1]
counts1 = 1
if min2 < 0:
index2 = -min2-1
order2 = nodeorder[index2]
counts2 = nodecounts[index2]
nodedist[nodeindex] = max(nodedist[nodeindex],nodedist[index2])
else:
order2 = order[min2]
counts2 = 1
counts = counts1 + counts2
nodecounts[nodeindex] = counts
nodeorder[nodeindex] = (counts1*order1+counts2*order2) / counts
# Now set up order based on the tree structure
index = _treesort(order, nodeorder, nodecounts, tree)
return index
|
StarcoderdataPython
|
46253
|
import dash
from dash.dependencies import Input, Output, State
import dash_core_components as dcc
import dash_html_components as html
import dash_table
import plotly.express as px
import pandas as pd
import requests
from bs4 import BeautifulSoup
import re
from newspaper import Article
import sys
module_path = './question_answering'
if module_path not in sys.path:
sys.path.append(module_path)
from question_answering_inference_utility import question_answering
# ------------------------------ LAYOUT CONTAINERS ------------------------------
# The main page will be divided in two columns.
# We'll define them right away.
# 1st column:
col_1 = html.Div(
className='column-left',
children=[
html.Div([
# Where the content of the tabs will be showed:
html.B(
children='The article must be in English.'
),
dcc.Input(
id="input_url",
type="url",
placeholder="Source URL",
required=True,
style=dict(
width='100%'
)
)
]),
# Radiobuttons for choosing parser:
html.Div([
html.Div('Parser:'),
dcc.RadioItems(
id='radiobuttons-parser',
options=[
{'label': "lxml: newspaper3k's default parser", 'value': 'lxml'},
{'label': "html.parser: Python's default parser",
'value': 'html.parser'},
{'label': 'html5lib: Extremely lenient but quite slow',
'value': 'html5lib'},
],
value='lxml'
)
]),
# Button to extract text from the give url:
html.Button(
id='extract-button-state',
n_clicks=0,
children='Extract',
style=dict(
width='100%'
)
),
# Div to debug:
html.Div(id='output-state-extract')
]
)
# 2nd column:
col_2 = html.Div(
className='column-right',
children=[
# Text area for the extracted text from the URL:
html.Div('Extracted text:'),
dcc.Textarea(
id='textarea-processed-url',
className='extracted-text',
value='Textarea content initialized\nwith multiple lines of text',
persistence=True,
readOnly=True,
)]
)
# Input question container:
input_question_container = html.Div(
children=[
html.Div(
style={'margin-bottom': '10px'},
children=[
html.Div('Question:'),
dcc.Input(
id="input-question",
type='text',
placeholder="Please enter your question.",
style=dict(
width='100%',
)
)
]
),
# Checkbutton for filtering numbers:
dcc.Checklist(
id='checkbutton',
options=[
{'label': 'Filter numbers', 'value': '1'},
],
value=['1']
),
]
)
# Submit button container:
submit_button_container = html.Div(
children=[
html.Div(
className='center',
children=[
html.Button(
id='submit-button-state',
className='submit-button',
n_clicks=0,
children='Submit')
],
style=dict(
margin='20px'
)
),
# Div to debug:
html.Div(id='output-state-submit')
]
)
# ------------------------------ FINAL LAYOUT ------------------------------
layout = html.Div(
className='main',
children=[
html.H1(
children='T5 Text Extraction Demo',
style={'textAlign': 'center'}
),
html.Div(
className="row",
children=[
col_1,
col_2
]
),
# Horizontal separator:
html.Hr(),
input_question_container,
submit_button_container
]
)
# ------------------------------ CALLBACKS ------------------------------
# Callback for when the Extract button has been pressed:
@app.callback(
[
Output('output-state-extract', 'children'),
Output('textarea-processed-url', 'value')
],
[
Input('extract-button-state', 'n_clicks')
],
[
State('input_url', 'value'),
State('radiobuttons-parser', 'value')
]
)
def update_output_extract_button(n_clicks, url, parser):
children = ''
# Web scrapping
if url is None:
return children, ''
else:
regex_url = r"https?:\/\/(www\.)?[-a-zA-Z0-9@:%._\+~#=]{1,256}\.[a-zA-Z0-9()]{1,6}\b([-a-zA-Z0-9()@:%_\+.~#?&//=]*)"
if re.match(regex_url, url) is None:
children += '\nIncorrect url!'
return children, ''
else:
article = Article(url=url, language='en')
article.download()
article.parse()
title = article.title
article.nlp()
keywords = article.keywords
if parser=='lxml':
extracted_txt = article.text
else:
resp = requests.get(url)
html_doc = resp.text
soup = BeautifulSoup(html_doc, 'html.parser')
extracted_txt = re.sub(r'(\n| )+', ' ', soup.text)
children += f'Title: {title}, keywords: {keywords}, parser: {parser}'
return children, extracted_txt
# Callback for when the Submit button has been pressed:
@app.callback(
Output('output-state-submit', 'children'),
[
Input('submit-button-state', 'n_clicks')
],
[
State('textarea-processed-url', 'value'),
State('input-question', 'value'),
State('checkbutton', 'value')
]
)
def update_output_extract_button(n_clicks, processed_txt, question, checkbutton_value):
# Question answering:
if n_clicks >= 1:
if len(processed_txt) == 0:
return 'Please enter a proper URL.'
elif question is None:
return 'Please enter a question.'
else:
answer, paragraph, prob = question_answering(
query=question, text=processed_txt, filter_numbers=checkbutton_value)
children = [
html.P(f'Asked question: "{question}"'),
html.P(f'Answer: {answer}'),
html.P(f'Paragraph: {paragraph}'),
html.P(f'Probability: {prob}')
]
return children
else:
return ''
|
StarcoderdataPython
|
3229157
|
"""Test cases for ansatz-related utilities."""
import unittest
from unittest import mock
import numpy as np
import numpy.testing
from zquantum.core.interfaces.ansatz_utils import (
DynamicProperty,
ansatz_property,
combine_ansatz_params,
invalidates_parametrized_circuit,
)
class PseudoAnsatz:
n_layers = ansatz_property(name="n_layers")
def __init__(self, n_layers):
self.n_layers = n_layers
self._parametrized_circuit = None
@property
def parametrized_circuit(self):
if self._parametrized_circuit is None:
self._parametrized_circuit = f"Circuit with {self.n_layers} layers"
return self._parametrized_circuit
@invalidates_parametrized_circuit
def rotate(self):
"""Mock method that "alters" some characteristics of ansatz.
Using this method should invalidate parametrized circuit.
"""
class DynamicPropertyTests(unittest.TestCase):
def test_uses_default_value_if_not_overwritten(self):
class MyCls:
x = DynamicProperty(name="x", default_value=-15)
obj = MyCls()
self.assertEqual(obj.x, -15)
def test_can_be_set_in_init(self):
class MyCls:
length = DynamicProperty(name="length")
def __init__(self, length):
self.length = length
obj = MyCls(0.5)
self.assertEqual(obj.length, 0.5)
def test_values_are_instance_dependent(self):
class MyCls:
height = DynamicProperty(name="height")
obj1 = MyCls()
obj2 = MyCls()
obj1.height = 15
obj2.height = 30
self.assertEqual(obj1.height, 15)
self.assertEqual(obj2.height, 30)
class TestAnsatzProperty(unittest.TestCase):
"""Note that we don't really need an ansatz intance, we only need to check that
_parametrized_circuit is set to None.
"""
def test_setter_resets_parametrized_circuit(self):
ansatz = PseudoAnsatz(n_layers=10)
# Trigger initial computation of parametrized circuit
self.assertEqual(ansatz.parametrized_circuit, "Circuit with 10 layers")
# Change n_layers -> check if it recalculated.
ansatz.n_layers = 20
self.assertIsNone(ansatz._parametrized_circuit)
self.assertEqual(ansatz.parametrized_circuit, "Circuit with 20 layers")
class InvalidatesParametrizedCircuitTest(unittest.TestCase):
def test_resets_parametrized_circuit(self):
ansatz = PseudoAnsatz(n_layers=10)
# Trigger initial computation of parametrized circuit
self.assertEqual(ansatz.parametrized_circuit, "Circuit with 10 layers")
# Trigger circuit invalidation
ansatz.rotate()
self.assertIsNone(ansatz._parametrized_circuit)
def test_forwards_arguments_to_underlying_methods(self):
method_mock = mock.Mock()
decorated_method = invalidates_parametrized_circuit(method_mock)
ansatz = PseudoAnsatz(n_layers=10)
# Mock calling a regular method. Notice that we need to pass self explicitly
decorated_method(ansatz, 2.0, 1.0, x=100, label="test")
# Check that arguments were passed to underlying method
method_mock.assert_called_once_with(ansatz, 2.0, 1.0, x=100, label="test")
def test_combine_ansatz_params():
params1 = np.array([1.0, 2.0])
params2 = np.array([3.0, 4.0])
target_params = np.array([1.0, 2.0, 3.0, 4.0])
combined_params = combine_ansatz_params(params1, params2)
np.testing.assert_array_equal(combined_params, target_params)
|
StarcoderdataPython
|
271690
|
import tensorflow as tf
import numpy as np
import logging
def crop_and_concat(net1, net2):
"""
the size(net1) <= size(net2)
"""
net1_shape = net1.get_shape().as_list()
net2_shape = net2.get_shape().as_list()
# print(net1_shape)
# print(net2_shape)
# if net2_shape[1] >= net1_shape[1] and net2_shape[2] >= net1_shape[2]:
offsets = [0, (net2_shape[1] - net1_shape[1]) // 2, (net2_shape[2] - net1_shape[2]) // 2, 0]
size = [-1, net1_shape[1], net1_shape[2], -1]
net2_resize = tf.slice(net2, offsets, size)
return tf.concat([net1, net2_resize], 3)
# else:
# offsets = [0, (net1_shape[1] - net2_shape[1]) // 2, (net1_shape[2] - net2_shape[2]) // 2, 0]
# size = [-1, net2_shape[1], net2_shape[2], -1]
# net1_resize = tf.slice(net1, offsets, size)
# return tf.concat([net1_resize, net2], 3)
def crop_only(net1, net2):
"""
the size(net1) <= size(net2)
"""
net1_shape = net1.get_shape().as_list()
net2_shape = net2.get_shape().as_list()
# print(net1_shape)
# print(net2_shape)
# if net2_shape[1] >= net1_shape[1] and net2_shape[2] >= net1_shape[2]:
offsets = [0, (net2_shape[1] - net1_shape[1]) // 2, (net2_shape[2] - net1_shape[2]) // 2, 0]
size = [-1, net1_shape[1], net1_shape[2], -1]
net2_resize = tf.slice(net2, offsets, size)
# return tf.concat([net1, net2_resize], 3)
return net2_resize
class Model:
def __init__(self, config, input_batch=None, mode='train'):
self.depths = config.depths
self.filters_root = config.filters_root
self.kernel_size = config.kernel_size
self.dilation_rate = config.dilation_rate
self.pool_size = config.pool_size
self.X_shape = config.X_shape
self.Y_shape = config.Y_shape
self.n_channel = config.n_channel
self.n_class = config.n_class
self.class_weights = config.class_weights
self.batch_size = config.batch_size
self.loss_type = config.loss_type
self.weight_decay = config.weight_decay
self.optimizer = config.optimizer
self.learning_rate = config.learning_rate
self.decay_step = config.decay_step
self.decay_rate = config.decay_rate
self.momentum = config.momentum
self.global_step = tf.compat.v1.get_variable(name="global_step", initializer=0, dtype=tf.int32)
self.summary_train = []
self.summary_valid = []
self.build(input_batch, mode=mode)
def add_placeholders(self, input_batch=None, mode="train"):
if input_batch is None:
self.X = tf.compat.v1.placeholder(dtype=tf.float32,
shape=[None, self.X_shape[0], self.X_shape[1], self.X_shape[2]], name='X')
self.Y = tf.compat.v1.placeholder(dtype=tf.float32,
shape=[None, self.Y_shape[0], self.Y_shape[1], self.n_class], name='y')
else:
self.X = input_batch[0]
if mode in ["train", "valid", "test"]:
self.Y = input_batch[1]
self.input_batch = input_batch
self.is_training = tf.compat.v1.placeholder(dtype=tf.bool, name="is_training")
# self.keep_prob = tf.placeholder(dtype=tf.float32, name="keep_prob")
self.drop_rate = tf.compat.v1.placeholder(dtype=tf.float32, name="drop_rate")
def add_prediction_op(self):
logging.info("Model: depths {depths}, filters {filters}, "
"filter size {kernel_size[0]}x{kernel_size[1]}, "
"pool size: {pool_size[0]}x{pool_size[1]}, "
"dilation rate: {dilation_rate[0]}x{dilation_rate[1]}".format(
depths=self.depths,
filters=self.filters_root,
kernel_size=self.kernel_size,
dilation_rate=self.dilation_rate,
pool_size=self.pool_size))
if self.weight_decay > 0:
weight_decay = tf.constant(self.weight_decay, dtype=tf.float32, name="weight_constant")
self.regularizer = tf.keras.regularizers.l2(l=0.5 * (weight_decay))
else:
self.regularizer = None
self.initializer = tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg",
distribution="uniform")
# down sample layers
convs = [None] * self.depths # store output of each depth
with tf.compat.v1.variable_scope("Input"):
net = self.X
net = tf.compat.v1.layers.conv2d(net,
filters=self.filters_root,
kernel_size=self.kernel_size,
activation=None,
padding='same',
dilation_rate=self.dilation_rate,
kernel_initializer=self.initializer,
kernel_regularizer=self.regularizer,
# bias_regularizer=self.regularizer,
name="input_conv")
net = tf.compat.v1.layers.batch_normalization(net,
training=self.is_training,
name="input_bn")
net = tf.nn.relu(net,
name="input_relu")
# net = tf.nn.dropout(net, self.keep_prob)
net = tf.compat.v1.layers.dropout(net,
rate=self.drop_rate,
training=self.is_training,
name="input_dropout")
for depth in range(0, self.depths):
with tf.compat.v1.variable_scope("DownConv_%d" % depth):
filters = int(2 ** (depth) * self.filters_root)
net = tf.compat.v1.layers.conv2d(net,
filters=filters,
kernel_size=self.kernel_size,
activation=None,
use_bias=False,
padding='same',
dilation_rate=self.dilation_rate,
kernel_initializer=self.initializer,
kernel_regularizer=self.regularizer,
# bias_regularizer=self.regularizer,
name="down_conv1_{}".format(depth + 1))
net = tf.compat.v1.layers.batch_normalization(net,
training=self.is_training,
name="down_bn1_{}".format(depth + 1))
net = tf.nn.relu(net,
name="down_relu1_{}".format(depth + 1))
net = tf.compat.v1.layers.dropout(net,
rate=self.drop_rate,
training=self.is_training,
name="down_dropout1_{}".format(depth + 1))
convs[depth] = net
if depth < self.depths - 1:
net = tf.compat.v1.layers.conv2d(net,
filters=filters,
kernel_size=self.kernel_size,
strides=self.pool_size,
activation=None,
use_bias=False,
padding='same',
dilation_rate=self.dilation_rate,
kernel_initializer=self.initializer,
kernel_regularizer=self.regularizer,
# bias_regularizer=self.regularizer,
name="down_conv3_{}".format(depth + 1))
net = tf.compat.v1.layers.batch_normalization(net,
training=self.is_training,
name="down_bn3_{}".format(depth + 1))
net = tf.nn.relu(net,
name="down_relu3_{}".format(depth + 1))
net = tf.compat.v1.layers.dropout(net,
rate=self.drop_rate,
training=self.is_training,
name="down_dropout3_{}".format(depth + 1))
# up layers
for depth in range(self.depths - 2, -1, -1):
with tf.compat.v1.variable_scope("UpConv_%d" % depth):
filters = int(2 ** (depth) * self.filters_root)
net = tf.compat.v1.layers.conv2d_transpose(net,
filters=filters,
kernel_size=self.kernel_size,
strides=self.pool_size,
activation=None,
use_bias=False,
padding="same",
kernel_initializer=self.initializer,
kernel_regularizer=self.regularizer,
# bias_regularizer=self.regularizer,
name="up_conv0_{}".format(depth + 1))
net = tf.compat.v1.layers.batch_normalization(net,
training=self.is_training,
name="up_bn0_{}".format(depth + 1))
net = tf.nn.relu(net,
name="up_relu0_{}".format(depth + 1))
net = tf.compat.v1.layers.dropout(net,
rate=self.drop_rate,
training=self.is_training,
name="up_dropout0_{}".format(depth + 1))
# skip connection
net = crop_and_concat(convs[depth], net)
# net = crop_only(convs[depth], net)
net = tf.compat.v1.layers.conv2d(net,
filters=filters,
kernel_size=self.kernel_size,
activation=None,
use_bias=False,
padding='same',
dilation_rate=self.dilation_rate,
kernel_initializer=self.initializer,
kernel_regularizer=self.regularizer,
# bias_regularizer=self.regularizer,
name="up_conv1_{}".format(depth + 1))
net = tf.compat.v1.layers.batch_normalization(net,
training=self.is_training,
name="up_bn1_{}".format(depth + 1))
net = tf.nn.relu(net,
name="up_relu1_{}".format(depth + 1))
net = tf.compat.v1.layers.dropout(net,
rate=self.drop_rate,
training=self.is_training,
name="up_dropout1_{}".format(depth + 1))
# Output Map
with tf.compat.v1.variable_scope("Output"):
net = tf.compat.v1.layers.conv2d(net,
filters=self.n_class,
kernel_size=(1, 1),
activation=None,
padding='same',
# dilation_rate=self.dilation_rate,
kernel_initializer=self.initializer,
kernel_regularizer=self.regularizer,
# bias_regularizer=self.regularizer,
name="output_conv")
# net = tf.nn.relu(net,
# name="output_relu")
# net = tf.layers.dropout(net,
# rate=self.drop_rate,
# training=self.is_training,
# name="output_dropout")
# net = tf.layers.batch_normalization(net,
# training=self.is_training,
# name="output_bn")
output = net
with tf.compat.v1.variable_scope("representation"):
self.representation = convs[-1]
with tf.compat.v1.variable_scope("logits"):
self.logits = output
tmp = tf.compat.v1.summary.histogram("logits", self.logits)
self.summary_train.append(tmp)
with tf.compat.v1.variable_scope("preds"):
self.preds = tf.nn.softmax(output)
tmp = tf.compat.v1.summary.histogram("preds", self.preds)
self.summary_train.append(tmp)
def add_loss_op(self):
if self.loss_type == "cross_entropy":
with tf.compat.v1.variable_scope("cross_entropy"):
flat_logits = tf.reshape(self.logits, [-1, self.n_class], name="logits")
flat_labels = tf.reshape(self.Y, [-1, self.n_class], name="labels")
if (np.array(self.class_weights) != 1).any():
class_weights = tf.constant(np.array(self.class_weights, dtype=np.float32), name="class_weights")
weight_map = tf.multiply(flat_labels, class_weights)
weight_map = tf.reduce_sum(input_tensor=weight_map, axis=1)
loss_map = tf.nn.softmax_cross_entropy_with_logits(logits=flat_logits,
labels=flat_labels)
# loss_map = tf.nn.sigmoid_cross_entropy_with_logits(logits=flat_logits,
# labels=flat_labels)
weighted_loss = tf.multiply(loss_map, weight_map)
loss = tf.reduce_mean(input_tensor=weighted_loss)
else:
loss = tf.reduce_mean(input_tensor=tf.nn.softmax_cross_entropy_with_logits(logits=flat_logits,
labels=flat_labels))
# loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=flat_logits,
# labels=flat_labels))
elif self.loss_type == "IOU":
with tf.compat.v1.variable_scope("IOU"):
eps = 1e-7
loss = 0
for i in range(1, self.n_class):
intersection = eps + tf.reduce_sum(input_tensor=self.preds[:, :, :, i] * self.Y[:, :, :, i],
axis=[1, 2])
union = eps + tf.reduce_sum(input_tensor=self.preds[:, :, :, i], axis=[1, 2]) + tf.reduce_sum(
input_tensor=self.Y[:, :, :, i], axis=[1, 2])
loss += 1 - tf.reduce_mean(input_tensor=intersection / union)
elif self.loss_type == "mean_squared":
with tf.compat.v1.variable_scope("mean_squared"):
flat_logits = tf.reshape(self.logits, [-1, self.n_class], name="logits")
flat_labels = tf.reshape(self.Y, [-1, self.n_class], name="labels")
with tf.compat.v1.variable_scope("mean_squared"):
loss = tf.compat.v1.losses.mean_squared_error(labels=flat_labels, predictions=flat_logits)
else:
raise ValueError("Unknown loss function: " % self.loss_type)
tmp = tf.compat.v1.summary.scalar("train_loss", loss)
self.summary_train.append(tmp)
tmp = tf.compat.v1.summary.scalar("valid_loss", loss)
self.summary_valid.append(tmp)
if self.weight_decay > 0:
with tf.compat.v1.name_scope('weight_loss'):
tmp = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES)
weight_loss = tf.add_n(tmp, name="weight_loss")
self.loss = loss + weight_loss
else:
self.loss = loss
def add_training_op(self):
if self.optimizer == "momentum":
self.learning_rate_node = tf.compat.v1.train.exponential_decay(learning_rate=self.learning_rate,
global_step=self.global_step,
decay_steps=self.decay_step,
decay_rate=self.decay_rate,
staircase=True)
optimizer = tf.compat.v1.train.MomentumOptimizer(learning_rate=self.learning_rate_node,
momentum=self.momentum)
elif self.optimizer == "adam":
self.learning_rate_node = tf.compat.v1.train.exponential_decay(learning_rate=self.learning_rate,
global_step=self.global_step,
decay_steps=self.decay_step,
decay_rate=self.decay_rate,
staircase=True)
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=self.learning_rate_node)
update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
self.train_op = optimizer.minimize(self.loss, global_step=self.global_step)
tmp = tf.compat.v1.summary.scalar("learning_rate", self.learning_rate_node)
self.summary_train.append(tmp)
def add_metrics_op(self):
with tf.compat.v1.variable_scope("metrics"):
Y = tf.argmax(input=self.Y, axis=-1)
confusion_matrix = tf.cast(tf.math.confusion_matrix(
labels=tf.reshape(Y, [-1]),
predictions=tf.reshape(self.preds, [-1]),
num_classes=self.n_class, name='confusion_matrix'),
dtype=tf.float32)
# with tf.variable_scope("P"):
c = tf.constant(1e-7, dtype=tf.float32)
precision_P = (confusion_matrix[1, 1] + c) / (tf.reduce_sum(input_tensor=confusion_matrix[:, 1]) + c)
recall_P = (confusion_matrix[1, 1] + c) / (tf.reduce_sum(input_tensor=confusion_matrix[1, :]) + c)
f1_P = 2 * precision_P * recall_P / (precision_P + recall_P)
tmp1 = tf.compat.v1.summary.scalar("train_precision_p", precision_P)
tmp2 = tf.compat.v1.summary.scalar("train_recall_p", recall_P)
tmp3 = tf.compat.v1.summary.scalar("train_f1_p", f1_P)
self.summary_train.extend([tmp1, tmp2, tmp3])
tmp1 = tf.compat.v1.summary.scalar("valid_precision_p", precision_P)
tmp2 = tf.compat.v1.summary.scalar("valid_recall_p", recall_P)
tmp3 = tf.compat.v1.summary.scalar("valid_f1_p", f1_P)
self.summary_valid.extend([tmp1, tmp2, tmp3])
# with tf.variable_scope("S"):
precision_S = (confusion_matrix[2, 2] + c) / (tf.reduce_sum(input_tensor=confusion_matrix[:, 2]) + c)
recall_S = (confusion_matrix[2, 2] + c) / (tf.reduce_sum(input_tensor=confusion_matrix[2, :]) + c)
f1_S = 2 * precision_S * recall_S / (precision_S + recall_S)
tmp1 = tf.compat.v1.summary.scalar("train_precision_s", precision_S)
tmp2 = tf.compat.v1.summary.scalar("train_recall_s", recall_S)
tmp3 = tf.compat.v1.summary.scalar("train_f1_s", f1_S)
self.summary_train.extend([tmp1, tmp2, tmp3])
tmp1 = tf.compat.v1.summary.scalar("valid_precision_s", precision_S)
tmp2 = tf.compat.v1.summary.scalar("valid_recall_s", recall_S)
tmp3 = tf.compat.v1.summary.scalar("valid_f1_s", f1_S)
self.summary_valid.extend([tmp1, tmp2, tmp3])
self.precision = [precision_P, precision_S]
self.recall = [recall_P, recall_S]
self.f1 = [f1_P, f1_S]
def train_on_batch(self, sess, inputs_batch, labels_batch, summary_writer, drop_rate=0.0):
feed = {self.X: inputs_batch,
self.Y: labels_batch,
self.drop_rate: drop_rate,
self.is_training: True}
_, step_summary, step, loss = sess.run([self.train_op,
self.summary_train,
self.global_step,
self.loss],
feed_dict=feed)
summary_writer.add_summary(step_summary, step)
return loss
def valid_on_batch(self, sess, inputs_batch, labels_batch, summary_writer):
feed = {self.X: inputs_batch,
self.Y: labels_batch,
self.drop_rate: 0,
self.is_training: False}
step_summary, step, loss, preds = sess.run([self.summary_valid,
self.global_step,
self.loss,
self.preds],
feed_dict=feed)
summary_writer.add_summary(step_summary, step)
return loss, preds
# def train_on_batch(self, sess, summary_writer, drop_rate=0.0, raw_data=False):
# feed = {self.drop_rate: drop_rate,
# self.is_training: True}
# if raw_data:
# _, step_summary, step, loss, preds, logits, \
# X_batch, Y_batch = sess.run([self.train_op,
# self.summary_train,
# self.global_step,
# self.loss,
# self.preds,
# self.logits,
# self.X,
# self.Y],
# feed_dict=feed)
# summary_writer.add_summary(step_summary, step)
# return loss, preds, logits, X_batch, Y_batch
# else:
# _, step_summary, step, loss = sess.run([self.train_op,
# self.summary_train,
# self.global_step,
# self.loss],
# feed_dict=feed)
# summary_writer.add_summary(step_summary, step)
# return loss
def test_on_batch(self, sess, summary_writer):
feed = {self.drop_rate: 0,
self.is_training: False}
step_summary, step, loss, preds, \
X_batch, Y_batch, fname_batch, \
itp_batch, its_batch = sess.run([self.summary_valid,
self.global_step,
self.loss,
self.preds,
self.X,
self.Y,
self.input_batch[2],
self.input_batch[3],
self.input_batch[4]],
feed_dict=feed)
summary_writer.add_summary(step_summary, step)
return loss, preds, X_batch, Y_batch, fname_batch, itp_batch, its_batch
def build(self, input_batch=None, mode='train'):
self.add_placeholders(input_batch, mode)
self.add_prediction_op()
if mode in ["train", "valid", "test"]:
self.add_loss_op()
self.add_training_op()
# self.add_metrics_op()
self.summary_train = tf.compat.v1.summary.merge(self.summary_train)
self.summary_valid = tf.compat.v1.summary.merge(self.summary_valid)
return 0
|
StarcoderdataPython
|
1849851
|
<gh_stars>10-100
# Code créé par <NAME> le 7 Mai 2018
# Kolmogorov-Smyrnov Test extended to two dimensions.
# References:s
# [1] <NAME>. (1983). Two-dimensional goodness-of-fit testing
# in astronomy. Monthly Notices of the Royal Astronomical Society,
# 202(3), 615-627.
# [2] <NAME>., & <NAME>. (1987). A multidimensional version of
# the Kolmogorov–Smirnov test. Monthly Notices of the Royal Astronomical
# Society, 225(1), 155-170.
# [3] <NAME>., <NAME>., <NAME>., & <NAME>.
# (1992). Numerical recipes in C. Press Syndicate of the University
# of Cambridge, New York, 24, 78.
import sys
import inspect
import numpy as np
import scipy.stats
def CountQuads(Arr2D, point):
""" Computes the probabilities of finding points in each 4 quadrant
defined by a vertical and horizontal lines crossing the point, by counting
the proportion of points in Arr2D in each quadrant.
:param list Arr2D: Array of points to be counted.
:param array point: A 2 element list, point, which is the center of
4 square quadrants.
:returns: a tuple of 4 floats. The probabilities of finding a point in
each quadrants, with point as the origin. p stands for positive, n for
negative, with the first and second positions meaning the x and y
directions respectively.
"""
if isinstance(point, list):
point = np.asarray((np.ravel(point)))
elif type(point).__module__+type(point).__name__ == 'numpyndarray':
point = np.ravel(point.copy())
else:
raise TypeError('Input point is neither list nor numpyndarray')
if len(point) != 2:
return
if isinstance(Arr2D, list):
Arr2D = np.asarray((Arr2D))
elif type(Arr2D).__module__+type(Arr2D).__name__ == 'numpyndarray':
pass
else:
raise TypeError('Input Arr2D is neither list nor numpyndarray')
if Arr2D.shape[1] > Arr2D.shape[0]: # Reshape to A[row,column]
Arr2D = Arr2D.copy().T
if Arr2D.shape[1] != 2:
raise TypeError('Input Arr2D is not 2D')
# The pp of Qpp refer to p for 'positive' and n for 'negative' quadrants.
# In order. first subscript is x, second is y.
Qpp = Arr2D[(Arr2D[:, 0] > point[0]) & (Arr2D[:, 1] > point[1]), :]
Qnp = Arr2D[(Arr2D[:, 0] < point[0]) & (Arr2D[:, 1] > point[1]), :]
Qpn = Arr2D[(Arr2D[:, 0] > point[0]) & (Arr2D[:, 1] < point[1]), :]
Qnn = Arr2D[(Arr2D[:, 0] < point[0]) & (Arr2D[:, 1] < point[1]), :]
# Normalized fractions:
ff = 1./len(Arr2D)
fpp = len(Qpp)*ff
fnp = len(Qnp)*ff
fpn = len(Qpn)*ff
fnn = len(Qnn)*ff
# NOTE: all the f's are supposed to sum to 1.0. Float representation
# cause SOMETIMES sum to 1.000000002 or something. I don't know how to
# test for that reliably, OR what to do about it yet. Keep in mind.
return(fpp, fnp, fpn, fnn)
def FuncQuads(func2D, point, xlim, ylim, rounddig=4):
""" Computes the probabilities of finding points in each 4 quadrant
defined by a vertical and horizontal lines crossing the point, by
integrating the density function func2D in each quadrant.
:param array func2D: Density function that takes 2 arguments: x and y.
:param list point: A 2 element list, point, which is the center of 4
square quadrants.
:param array xlim,ylim: Domain of numerical integration necessary to
compute the quadrant probabilities.
:returns: a tuple of 4 floats. The probabilities of finding a point in
each quadrants, with point as the origin. p stands for positive,
n for negative, with the first and second positions meaning the x and y
directions respectively.
"""
if callable(func2D):
if len(inspect.getfullargspec(func2D)[0]) != 2:
raise TypeError('Input func2D is not a function with 2 arguments')
pass
else:
raise TypeError('Input func2D is not a function')
# If xlim, ylim and point are not lists or ndarray, exit.
if isinstance(point, list):
point = np.asarray((np.ravel(point)))
elif type(point).__module__+type(point).__name__ == 'numpyndarray':
point = np.ravel(point.copy())
else:
raise TypeError('Input point is not a list or numpyndarray')
if len(point) != 2:
raise TypeError('Input point has not exactly 2 elements')
if isinstance(xlim, list):
xlim = np.asarray((np.sort(np.ravel(xlim))))
elif type(xlim).__module__+type(xlim).__name__ == 'numpyndarray':
xlim = np.sort(np.ravel(xlim.copy()))
else:
raise TypeError('Input xlim is not a list or ndarray')
if len(xlim) != 2:
raise TypeError('Input xlim has not exactly 2 elements')
if xlim[0] == xlim[1]:
raise TypeError('Input xlim[0] should be different to xlim[1]')
if isinstance(ylim, list):
ylim = np.asarray((np.sort(np.ravel(ylim))))
elif type(ylim).__module__+type(ylim).__name__ == 'numpyndarray':
ylim = np.sort(np.ravel(ylim.copy()))
else:
raise TypeError('Input ylim is not a list or ndarray')
if len(ylim) != 2:
raise TypeError('Input ylim has not exactly 2 elements')
if ylim[0] == ylim[1]:
raise TypeError('Input ylim[0] should be different to ylim[1]')
# Numerical integration to find the quadrant probabilities.
totInt = scipy.integrate.dblquad(func2D, *xlim,
lambda x: np.amin(ylim),
lambda x: np.amax(ylim))[0]
Qpp = scipy.integrate.dblquad(func2D, point[0], np.amax(xlim),
lambda x: point[1],
lambda x: np.amax(ylim))[0]
Qpn = scipy.integrate.dblquad(func2D, point[0], np.amax(xlim),
lambda x: np.amin(ylim),
lambda x: point[1])[0]
Qnp = scipy.integrate.dblquad(func2D, np.amin(xlim), point[0],
lambda x: point[1],
lambda x: np.amax(ylim))[0]
Qnn = scipy.integrate.dblquad(func2D, np.amin(xlim), point[0],
lambda x: np.amin(ylim),
lambda x: point[1])[0]
fpp = round(Qpp/totInt, rounddig)
fnp = round(Qnp/totInt, rounddig)
fpn = round(Qpn/totInt, rounddig)
fnn = round(Qnn/totInt, rounddig)
return(fpp, fnp, fpn, fnn)
def Qks(alam, iter=100, prec=1e-17):
""" Computes the value of the KS probability function, as a function of
alam, the D statistic. From *Numerical recipes in C* page 623: '[...]
the K–S statistic useful is that its distribution in the case of the null
hypothesis (data sets drawn from the same distribution) can be calculated,
at least to useful approximation, thus giving the significance of any
observed nonzero value of D.' (D being the KS statistic).
:param float alam: D statistic.
:param int iter: Number of iterations to be perfomed. On non-convergence,
returns 1.0.
:param float prec: Convergence criteria of the qks. Stops converging if
that precision is attained.
:returns: a float. The significance level of the observed D statistic.
"""
# If j iterations are performed, meaning that toadd
# is still 2 times larger than the precision.
if isinstance(alam, int) | isinstance(alam, float):
pass
else:
raise TypeError('Input alam is neither int nor float')
toadd = [1]
qks = 0.
j = 1
while (j < iter) & (abs(toadd[-1]) > prec*2):
toadd.append(2.*(-1.)**(j-1.)*np.exp(-2.*j**2.*alam**2.))
qks += toadd[-1]
j += 1
if (j == iter) | (qks > 1): # If no convergence after j iter, return 1.0
return(1.0)
if qks < prec:
return(0.)
else:
return(qks)
def ks2d2s(Arr2D1, Arr2D2):
""" ks stands for Kolmogorov-Smirnov, 2d for 2 dimensional,
2s for 2 samples.
KS test for goodness-of-fit on two 2D samples. Tests the hypothesis that
the two samples are from the same distribution.
:param array Arr2D1: 2D array of points/samples.
:param array Arr2D2: 2D array of points/samples.
:returns: a tuple of two floats. First, the two-sample K-S statistic.
If this value is higher than the significance level of the hypothesis,
it is rejected. Second, the significance level of *d*. Small values of
prob show that the two samples are significantly different.
"""
if type(Arr2D1).__module__+type(Arr2D1).__name__ == 'numpyndarray':
pass
else:
raise TypeError('Input Arr2D1 is neither list nor numpyndarray')
if Arr2D1.shape[1] > Arr2D1.shape[0]:
Arr2D1 = Arr2D1.copy().T
if type(Arr2D2).__module__+type(Arr2D2).__name__ == 'numpyndarray':
pass
else:
raise TypeError('Input Arr2D2 is neither list nor numpyndarray')
if Arr2D2.shape[1] > Arr2D2.shape[0]:
Arr2D2 = Arr2D2.copy().T
if Arr2D1.shape[1] != 2:
raise TypeError('Input Arr2D1 is not 2D')
if Arr2D2.shape[1] != 2:
raise TypeError('Input Arr2D2 is not 2D')
d1, d2 = 0., 0.
for point1 in Arr2D1:
fpp1, fmp1, fpm1, fmm1 = CountQuads(Arr2D1, point1)
fpp2, fmp2, fpm2, fmm2 = CountQuads(Arr2D2, point1)
d1 = max(d1, abs(fpp1-fpp2))
d1 = max(d1, abs(fpm1-fpm2))
d1 = max(d1, abs(fmp1-fmp2))
d1 = max(d1, abs(fmm1-fmm2))
for point2 in Arr2D2:
fpp1, fmp1, fpm1, fmm1 = CountQuads(Arr2D1, point2)
fpp2, fmp2, fpm2, fmm2 = CountQuads(Arr2D2, point2)
d2 = max(d2, abs(fpp1-fpp2))
d2 = max(d2, abs(fpm1-fpm2))
d2 = max(d2, abs(fmp1-fmp2))
d2 = max(d2, abs(fmm1-fmm2))
d = (d1+d2)/2.
sqen = np.sqrt(len(Arr2D1)*len(Arr2D2)/(len(Arr2D1)+len(Arr2D2)))
R1 = scipy.stats.pearsonr(Arr2D1[:, 0], Arr2D1[:, 1])[0]
R2 = scipy.stats.pearsonr(Arr2D2[:, 0], Arr2D2[:, 1])[0]
RR = np.sqrt(1.-(R1*R1+R2*R2)/2.)
prob = Qks(d*sqen/(1.+RR*(0.25-0.75/sqen)))
# Small values of prob show that the two samples are significantly
# different. Prob is the significance level of an observed value of d.
# NOT the same as the significance level that ou set and compare to D.
return(d, prob)
def ks2d1s(Arr2D, func2D, xlim=[], ylim=[]):
""" ks stands for Kolmogorov-Smirnov, 2d for 2 dimensional,
1s for 1 sample.
KS test for goodness-of-fit on one 2D sample and one 2D density
distribution. Tests the hypothesis that the data was generated
from the density distribution.
:param array Arr2D: 2D array of points/samples.
:param func2D: Density distribution. Could implement a function for
arrays in the future...
:param array xlim, ylim: Defines the domain for the numerical integration
necessary to compute the quadrant probabilities.
:returns: tuple of two floats. First, the two-sample K-S statistic.
If this value is higher than the significance level of the hypothesis,
it is rejected. Second, the significance level of *d*. Small values of
prob show that the two samples are significantly different.
"""
if callable(func2D):
if len(inspect.getfullargspec(func2D)[0]) != 2:
raise TypeError('Input func2D is not a function with 2 input arguments')
pass
else:
raise TypeError('Input func2D is not a function')
if type(Arr2D).__module__+type(Arr2D).__name__ == 'numpyndarray':
pass
else:
raise TypeError('Input Arr2D is neither list nor numpyndarray')
print(Arr2D.shape)
if Arr2D.shape[1] > Arr2D.shape[0]:
Arr2D = Arr2D.copy().T
if Arr2D.shape[1] != 2:
raise TypeError('Input Arr2D is not 2D')
if xlim == []:
xlim.append(np.amin(Arr2D[:, 0]) -
abs(np.amin(Arr2D[:, 0]) -
np.amax(Arr2D[:, 0]))/10)
xlim.append(np.amax(Arr2D[:, 0]) -
abs(np.amin(Arr2D[:, 0]) -
np.amax(Arr2D[:, 0]))/10)
if ylim == []:
ylim.append(np.amin(Arr2D[:, 1]) -
abs(np.amin(Arr2D[:, 1]) -
np.amax(Arr2D[:, 1]))/10)
ylim.append(np.amax(Arr2D[:, 1]) -
abs(np.amin(Arr2D[:, 1]) -
np.amax(Arr2D[:, 1]))/10)
d = 0
for point in Arr2D:
fpp1, fmp1, fpm1, fmm1 = FuncQuads(func2D, point, xlim, ylim)
fpp2, fmp2, fpm2, fmm2 = CountQuads(Arr2D, point)
d = max(d, abs(fpp1-fpp2))
d = max(d, abs(fpm1-fpm2))
d = max(d, abs(fmp1-fmp2))
d = max(d, abs(fmm1-fmm2))
sqen = np.sqrt(len(Arr2D))
R1 = scipy.stats.pearsonr(Arr2D[:, 0], Arr2D[:, 1])[0]
RR = np.sqrt(1.0-R1**2)
prob = Qks(d*sqen/(1.+RR*(0.25-0.75/sqen)))
return d, prob
|
StarcoderdataPython
|
31780
|
<reponame>ramoslin02/binanceapi<filename>binanceapi/constant.py
from enum import Enum
class OrderStatus(object):
"""
Order Status
"""
NEW = "NEW"
PARTIALLY_FILLED = "PARTIALLY_FILLED"
FILLED = "FILLED"
CANCELED = "CANCELED"
PENDING_CANCEL = "PENDING_CANCEL"
REJECTED = "REJECTED"
EXPIRED = "EXPIRED"
class OrderType(Enum):
"""
Order type
"""
LIMIT = "LIMIT"
MARKET = "MARKET"
STOP = "STOP"
class RequestMethod(Enum):
"""
Request methods
"""
GET = 'GET'
POST = 'POST'
PUT = 'PUT'
DELETE = 'DELETE'
class Interval(Enum):
"""
Interval for klines
"""
MINUTE_1 = '1m'
MINUTE_3 = '3m'
MINUTE_5 = '5m'
MINUTE_15 = '15m'
MINUTE_30 = '30m'
HOUR_1 = '1h'
HOUR_2 = '2h'
HOUR_4 = '4h'
HOUR_6 = '6h'
HOUR_8 = '8h'
HOUR_12 = '12h'
DAY_1 = '1d'
DAY_3 = '3d'
WEEK_1 = '1w'
MONTH_1 = '1M'
class OrderSide(Enum):
"""
order side
"""
BUY = "BUY"
SELL = "SELL"
|
StarcoderdataPython
|
8045031
|
''''''
from flask import Flask
from flask_login import LoginManager
from flask_mongoengine import MongoEngine
from config import Config
db = MongoEngine()
login_manager = LoginManager()
#login_manager.session_protection = 'strong'
#login_manager.login_view = 'auth.login'
def create_app(config=Config.DEFAULT):
''''''
app = Flask(__name__)
app.config.from_object(config.value)
from .main import main
app.register_blueprint(main)
from .auth import auth
app.register_blueprint(auth)
db.init_app(app)
login_manager.init_app(app)
return app
|
StarcoderdataPython
|
11373940
|
"""
This file is written originally for testing the csv files directly in the test/ directory. Now as the DAG is
going to call the parser and will create tables in the tables in the database, this file will be only for a
reference on how to use Airflow DAGs and self-defined operators
"""
from datetime import datetime
from airflow import DAG
from airflow.contrib.sensors.file_sensor import FileSensor
from airflow.operators.dummy_operator import DummyOperator
from airflow.utils.trigger_rule import TriggerRule
from operators.my_operators import StartOperator, DirectorySensor, ScriptParser
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__version__ = '0.1'
__status__ = 'Development'
dag = DAG('my_operator_tester_dag',
description="'Testing custom operators",
schedule_interval='0 12 * * *',
start_date=datetime(2017, 3, 20), catchup=False)
starter = StartOperator(task_id="start_operator", dag=dag)
sensor = DirectorySensor(directory="/usr/local/airflow/test", task_id="directory_sensor", dag=dag)
parser = ScriptParser(directory="/usr/local/airflow/test", task_id="script_parser", dag=dag)
failed = DummyOperator(task_id="failing_state", trigger_rule=TriggerRule.ONE_FAILED, dag=dag)
success = DummyOperator(task_id="success_state", trigger_rule=TriggerRule.ALL_SUCCESS, dag=dag)
done = DummyOperator(task_id="finish_state", trigger_rule=TriggerRule.ALL_DONE, dag=dag)
starter >> sensor >> parser >> (failed, success) >> done
|
StarcoderdataPython
|
8085002
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import flwr as fl
class AggregateCustomMetricStrategy(fl.server.strategy.FedAvg):
def aggregate_evaluate(
self,
rnd: int,
results,
failures,
):
"""Aggregate evaluation losses using weighted average."""
if not results:
return None
# Weigh accuracy of each client by number of examples used
mse = [r.metrics["mse"] * r.num_examples for _, r in results]
examples = [r.num_examples for _, r in results]
# Aggregate and print custom metric
mse_aggregated = sum(mse) / sum(examples)
print(f"Round {rnd} R2 aggregated from client results: {mse_aggregated}")
# Call aggregate_evaluate from base class (FedAvg)
return super().aggregate_evaluate(rnd, results, failures)
|
StarcoderdataPython
|
9672478
|
from rest_framework.response import Response
import ippon.models.tournament as tm
def has_tournament_authorization(allowed_master_statuses, pk, request):
try:
if not isinstance(allowed_master_statuses, list):
allowed_master_statuses = [allowed_master_statuses]
admin = tm.TournamentAdmin.objects.get(
user=request.user.id,
tournament=pk,
is_master__in=allowed_master_statuses)
is_admin = False
if admin is not None:
is_admin = True
return Response({
'isAuthorized': is_admin
})
except tm.TournamentAdmin.DoesNotExist:
return Response({
'isAuthorized': False
})
|
StarcoderdataPython
|
1608136
|
# !usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under a 3-clause BSD license.
class CatalogdbError(Exception):
"""A custom core Catalogdb exception"""
def __init__(self, message=None):
message = 'There has been an error' \
if not message else message
super(CatalogdbError, self).__init__(message)
class CatalogdbNotImplemented(CatalogdbError):
"""A custom exception for not yet implemented features."""
def __init__(self, message=None):
message = 'This feature is not implemented yet.' \
if not message else message
super(CatalogdbNotImplemented, self).__init__(message)
class CatalogdbMissingDependency(CatalogdbError):
"""A custom exception for missing dependencies."""
pass
class CatalogdbWarning(Warning):
"""Base warning for Catalogdb."""
class CatalogdbUserWarning(UserWarning, CatalogdbWarning):
"""The primary warning class."""
pass
class CatalogdbDeprecationWarning(CatalogdbUserWarning):
"""A warning for deprecated features."""
pass
|
StarcoderdataPython
|
241257
|
from .toc import *
|
StarcoderdataPython
|
3261049
|
'''This test suite contains 3 passing tests. '''
import pytest
from seleniumbase import BaseCase
from basic_methods import BasicMethods as bm
import config
import constants as const
import time
class TestLogout(BaseCase):
def test_LogoutClicked_MsgUserRedirected(self):
''' Checks if when user open user's menu and press log out
will be redirected to autorization page and message appear'''
self.open(config.url)
bm.log_in(self, config.valid_username, config.valid_password)
self.click_chain([const.user_menu_btn_selector,
'span:contains("Logout")'])
self.assert_element(const.toast_msg_success_selector)
self.assertNotEqual(config.url, config.url_authorization,
"Did not redirect")
def test_LoggedoutClickBackButton_StayOnAuthorizationPage(self):
''' Checks if when user logs out and click back button
will still stay on authorization page'''
self.test_LogoutClicked_MsgUserRedirected()
self.go_back()
self.assertEqual(self.get_current_url(),
config.url_authorization, "Redirected")
def test_LoggedinClickedLogout_ShouldBeAbleToLogInAgain(self):
'''Checks if user is able to log in just after user logged out'''
self.open(config.url)
bm.log_in(self, config.valid_username, config.valid_password)
bm.log_out(self)
# need to hard-code login because bm.log_in opens config.url
time.sleep(1)
self.type(const.authorization_user_selector, config.valid_username)
self.type(const.authorization_password_selector,
config.valid_password+"\n")
self.wait_for_element_visible(const.user_menu_btn_selector)
self.assertNotEqual(self.get_current_url(),
config.url_authorization, "Redirected")
|
StarcoderdataPython
|
310635
|
<reponame>Yiling-J/pharos
import yaml
from unittest import TestCase, mock
from jinja2 import PackageLoader, Environment, FileSystemLoader
from kubernetes.dynamic import exceptions as api_exceptions
from pharos import models, fields, exceptions, lookups, backend, jinja
from pharos.jinja import to_yaml
from pharos.backend import TemplateBackend
class BaseCase(TestCase):
def setUp(self):
self.dynamic_client = mock.Mock()
self.client = mock.Mock()
self.client.settings.enable_chunk = True
self.client.settings.chunk_size = 100
self.client.settings.jinja_loader = PackageLoader("tests", "./")
self.client.settings.template_engine = "pharos.jinja.JinjaEngine"
self.client.dynamic_client = self.dynamic_client
class DeploymentTestCase(BaseCase):
def test_no_client(self):
with self.assertRaises(exceptions.ClientNotSet):
len(models.Deployment.objects.all())
def test_chunk_iterator(self):
mock_response = mock.Mock()
response_lambda = lambda token: {
"metadata": {"continue": token},
"items": [
{
"id": token,
"metadata": {
"ownerReferences": [{"kind": "Apple", "uid": "123"}],
"name": "test",
},
}
],
}
# should call 6 times, and get END signal, so 7 won't be called
mock_response.to_dict.side_effect = [
response_lambda(f"{i}") for i in [1, 2, 3, 4, 5, "END", 7]
]
self.dynamic_client.resources.get.return_value.get.return_value = mock_response
query = models.Deployment.objects.using(self.client).all()
self.assertEqual(len(query), 6)
expected_call = [
mock.call.get(_continue=None, limit=100),
mock.call.get(_continue="1", limit=100),
mock.call.get(_continue="2", limit=100),
mock.call.get(_continue="3", limit=100),
mock.call.get(_continue="4", limit=100),
mock.call.get(_continue="5", limit=100),
]
self.assertEqual(
self.dynamic_client.resources.get.return_value.method_calls, expected_call
)
def test_limit_with_iterator(self):
mock_response = mock.Mock()
response_lambda = lambda token: {
"metadata": {"continue": token},
"items": [
{
"id": token,
"metadata": {
"ownerReferences": [{"kind": "Apple", "uid": "123"}],
"name": "test",
},
}
],
}
# should call 3 times only
mock_response.to_dict.side_effect = [
response_lambda(f"{i}") for i in [1, 2, 3, 4, 5, "END", 7]
]
self.dynamic_client.resources.get.return_value.get.return_value = mock_response
query = models.Deployment.objects.using(self.client).limit(3)
self.assertEqual(len(query), 3)
expected_call = [
mock.call.get(_continue=None, limit=100),
mock.call.get(_continue="1", limit=100),
mock.call.get(_continue="2", limit=100),
]
self.assertEqual(
self.dynamic_client.resources.get.return_value.method_calls, expected_call
)
def test_deployment_query_basic(self):
test_cases = [
{
"query": models.Deployment.objects.using(self.client).all(),
"api_call": {},
},
{
"query": models.Deployment.objects.using(self.client).filter(
name="apple"
),
"api_call": {
"name": "apple",
},
},
{
"query": models.Deployment.objects.using(self.client).filter(
name="apple", namespace="orange"
),
"api_call": {
"name": "apple",
"namespace": "orange",
},
},
{
"query": models.Deployment.objects.using(self.client)
.filter(name="apple")
.filter(namespace="orange"),
"api_call": {
"name": "apple",
"namespace": "orange",
},
},
{
"query": models.Deployment.objects.using(self.client).filter(
selector="app in (a)"
),
"api_call": {
"label_selector": "app in (a)",
},
},
{
"query": models.Deployment.objects.using(self.client)
.filter(selector="app in (a)")
.filter(selector="app=b"),
"api_call": {
"label_selector": "app in (a),app=b",
},
},
{
"query": models.Deployment.objects.using(self.client).filter(
field_selector="name=foo"
),
"api_call": {
"field_selector": "name=foo",
},
},
{
"query": models.Deployment.objects.using(self.client)
.filter(field_selector="name=foo")
.filter(field_selector="type=bar"),
"api_call": {
"field_selector": "name=foo,type=bar",
},
},
]
self.dynamic_client.resources.get.return_value.get.return_value.to_dict.side_effect = lambda: {
"metadata": {},
"items": ["test"],
}
for case in test_cases:
with self.subTest(case=case):
len(case["query"])
self.assertEqual(
self.dynamic_client.resources.method_calls,
[mock.call.get(api_version="v1", kind="Deployment")],
)
self.assertEqual(
self.dynamic_client.resources.get.return_value.method_calls,
[mock.call.get(**case["api_call"], _continue=None, limit=100)],
)
self.dynamic_client.reset_mock()
models.Deployment.objects.using(self.client).get(
name="apple", namespace="orange"
)
self.assertEqual(
self.dynamic_client.resources.get.return_value.method_calls,
[
mock.call.get(
name="apple", namespace="orange", _continue=None, limit=100
)
],
)
def test_owner(self):
mock_data = {"kind": "Apple", "metadata": {"uid": "123"}}
mock_owner = models.Deployment(client=None, k8s_object=mock_data)
mock_response = mock.Mock()
mock_response.to_dict.side_effect = lambda: {
"metadata": {},
"items": [
{
"id": 1,
"metadata": {
"ownerReferences": [{"kind": "Apple", "uid": "123"}],
"name": "test",
},
},
{
"id": 2,
"metadata": {"ownerReferences": [{"kind": "Appl", "uid": "124"}]},
},
{
"id": 3,
"metadata": {"ownerReferences": [{"kind": "Apple", "uid": "125"}]},
},
{"id": 4, "metadata": {"ownerReferences": [{"kind": "Apple"}]}},
{
"id": 6,
"metadata": {"ownerReferences": [{"kind": "Apple", "uid": "123"}]},
},
],
}
self.dynamic_client.resources.get.return_value.get.return_value = mock_response
query = models.Deployment.objects.using(self.client).filter(owner=mock_owner)
self.assertEqual(len(query), 2)
mock_owner2 = models.Deployment(
client=None, k8s_object={"kind": "Apple", "metadata": {"uid": "124"}}
)
query = models.Deployment.objects.using(self.client).filter(
owner__in=[mock_owner, mock_owner2]
)
self.assertEqual(len(query), 3)
deployment = query[0]
self.assertEqual(deployment.name, "test")
def test_deployment_pods(self):
deployment = models.Deployment(
client=self.client,
k8s_object={
"metadata": {"uid": "123"},
"spec": {"selector": {"matchLabels": {"app": "test"}}},
},
)
mock_rs_response = mock.Mock()
mock_rs_response.to_dict.return_value = {
"metadata": {},
"items": [
{
"id": 1,
"metadata": {
"ownerReferences": [{"kind": "ReplicaSet", "uid": "123"}],
"uid": "234",
},
},
{
"id": 2,
"metadata": {
"ownerReferences": [{"kind": "ReplicaSet", "uid": "124"}],
"uid": "235",
},
},
{
"id": 3,
"metadata": {
"ownerReferences": [{"kind": "ReplicaSet", "uid": "123"}],
"uid": "236",
},
},
],
}
mock_pod_response = mock.Mock()
mock_pod_response.to_dict.return_value = {
"metadata": {},
"items": [
{
"id": 1,
"metadata": {
"ownerReferences": [{"kind": "ReplicaSet", "uid": "234"}]
},
},
{
"id": 2,
"metadata": {
"ownerReferences": [{"kind": "ReplicaSet", "uid": "235"}]
},
},
{"id": 4, "metadata": {"ownerReferences": [{"kind": "ReplicaSet"}]}},
],
}
# pod come first because owner filter is POST operator
self.dynamic_client.resources.get.return_value.get.side_effect = [
mock_pod_response,
mock_rs_response,
]
self.assertEqual(len(deployment.pods.all()), 1)
def test_refresh(self):
deployment = models.Deployment(
client=self.client,
k8s_object={
"metadata": {"uid": "123", "name": "foo"},
"spec": {"selector": {"matchLabels": {"app": "test"}}},
},
)
self.assertEqual(deployment.name, "foo")
mock_response = mock.Mock()
mock_response.to_dict.side_effect = lambda: {"metadata": {"name": "bar"}}
self.dynamic_client.resources.get.return_value.get.return_value = mock_response
deployment.refresh()
self.assertEqual(deployment.name, "bar")
def test_delete(self):
deployment = models.Deployment(
client=self.client,
k8s_object={
"metadata": {
"name": "nginx-deployment",
"annotations": {
"deployment.kubernetes.io/revision": "1",
"pharos.py/template": "test.yaml",
"pharos.py/variable": "deployment-nginx-deployment-default",
},
"spec": {"selector": {"matchLabels": {"app": "test"}}},
}
},
)
mock_response = {
"metadata": {
"name": "nginx-deployment",
"namespace": "default",
"annotations": {
"deployment.kubernetes.io/revision": "1",
"pharos.py/template": "test.yaml",
"pharos.py/variable": "deployment-nginx-deployment-default",
},
},
"json": {"label_name": "foo"},
}
self.dynamic_client.resources.get.return_value.get.return_value.to_dict.return_value = (
mock_response
)
deployment.delete()
self.assertSequenceEqual(
self.dynamic_client.resources.method_calls,
[
mock.call.get(api_version="v1", kind="Deployment"),
mock.call.get(api_version="pharos.py/v1", kind="Variable"),
mock.call.get(api_version="v1", kind="Deployment"),
],
)
self.assertSequenceEqual(
self.dynamic_client.resources.get.return_value.method_calls,
[
mock.call.get(name="nginx-deployment", namespace="default"),
mock.call.delete("deployment-nginx-deployment-default", None),
mock.call.delete("nginx-deployment", "default"),
],
)
def test_create_deployment_wrong_resource(self):
mock_response = {
"metadata": {
"name": "foobar",
"namespace": "default",
"annotations": {"pharos.py/template": "test.yaml"},
}
}
self.dynamic_client.resources.get.return_value.create.return_value.to_dict.return_value = (
mock_response
)
with self.assertRaises(exceptions.ResourceNotMatch):
models.Service.objects.using(self.client).create(
"test.yaml", {"label_name": "foo"}
)
class ServicePodsTestCase(BaseCase):
def test_service_pods(self):
service = models.Service(
client=self.client,
k8s_object={
"metadata": {"uid": "123"},
"spec": {"selector": {"foo": "bar"}},
},
)
mock_rs_response = mock.Mock()
mock_rs_response.to_dict.return_value = {}
self.dynamic_client.resources.get.return_value.get.return_value = (
mock_rs_response
)
len(service.pods.all())
self.assertEqual(
self.dynamic_client.resources.get.return_value.method_calls,
[mock.call.get(_continue=None, label_selector="foo=bar", limit=100, namespace=None)],
)
class CustomLookup(lookups.Lookup):
name = "foo"
type = lookups.Lookup.POST
def validate(self, obj, data):
return True
fields.JsonPathField.add_lookup(CustomLookup)
class CustomModel(models.Model):
id = fields.JsonPathField(path="id")
task = fields.JsonPathField(path="job.task")
class Meta:
api_version = "v1"
kind = "CustomModel"
class CustomModelTestCase(BaseCase):
def test_custom_model(self):
mock_data = {
"kind": "CustomModel",
"job": {"task": "task1"},
"metadata": {"name": "custom", "namespace": "default"},
}
mock_obj = CustomModel(client=None, k8s_object=mock_data)
self.assertEqual(mock_obj.task, "task1")
self.assertEqual(mock_obj.name, "custom")
self.assertEqual(mock_obj.namespace, "default")
def test_custom_filed_filter(self):
mock_response = mock.Mock()
mock_response.to_dict.side_effect = lambda: {
"metadata": {},
"items": [
{"id": 1, "job": {"task": "task1"}},
{"id": 2, "job": {"task": "task2"}},
{"id": 3, "job": {"task": "task3"}},
],
}
self.dynamic_client.resources.get.return_value.get.return_value = mock_response
queryset = CustomModel.objects.using(self.client).filter(task="task3")
self.assertEqual(len(queryset), 1)
self.assertEqual(queryset[0].task, "task3")
queryset = CustomModel.objects.using(self.client).filter(
task__in=["task1", "task3"]
)
self.assertEqual(len(queryset), 2)
self.assertEqual(queryset[0].task, "task1")
self.assertEqual(queryset[1].task, "task3")
def test_custom_lookup(self):
mock_response = mock.Mock()
mock_response.to_dict.side_effect = lambda: {
"metadata": {},
"items": [{"id": 1, "job": {"task": "task1"}}],
}
self.dynamic_client.resources.get.return_value.get.return_value = mock_response
queryset = CustomModel.objects.using(self.client).filter(task__foo="task3")
self.assertEqual(len(queryset), 1)
def test_contains(self):
mock_response = mock.Mock()
mock_response.to_dict.side_effect = lambda: {
"metadata": {},
"items": [
{"id": 1, "job": {"task": "foo"}},
{"id": 2, "job": {"task": "bar"}},
{"id": 3, "job": {"task": "barfoobar"}},
],
}
self.dynamic_client.resources.get.return_value.get.return_value = mock_response
queryset = CustomModel.objects.using(self.client).filter(task__contains="foo")
self.assertEqual(len(queryset), 2)
def test_contains_list(self):
mock_response = mock.Mock()
mock_response.to_dict.side_effect = lambda: {
"metadata": {},
"items": [
{"id": 1, "job": {"task": ["foo"]}},
{"id": 2, "job": {"task": ["foo", "bar"]}},
{"id": 3, "job": {"task": ["foo", "bar", "new"]}},
],
}
self.dynamic_client.resources.get.return_value.get.return_value = mock_response
queryset = CustomModel.objects.using(self.client).filter(
task__contains=["foo", "new"]
)
self.assertEqual(len(queryset), 1)
self.assertEqual(queryset[0].task, ["foo", "bar", "new"])
def test_startswith(self):
mock_response = mock.Mock()
mock_response.to_dict.side_effect = lambda: {
"metadata": {},
"items": [
{"id": 1, "job": {"task": "foofoo"}},
{"id": 2, "job": {"task": "fobar"}},
{"id": 3, "job": {"task": "barfoobar"}},
],
}
self.dynamic_client.resources.get.return_value.get.return_value = mock_response
queryset = CustomModel.objects.using(self.client).filter(task__startswith="foo")
self.assertEqual(len(queryset), 1)
def test_compare(self):
mock_response = mock.Mock()
mock_response.to_dict.side_effect = lambda: {
"metadata": {},
"items": [
{"id": 1, "job": {"task": "foofoo"}},
{"id": 2, "job": {"task": "fobar"}},
{"id": 3, "job": {"task": "barfoobar"}},
],
}
self.dynamic_client.resources.get.return_value.get.return_value = mock_response
queryset = CustomModel.objects.using(self.client).filter(id__gt=1)
self.assertEqual(len(queryset), 2)
queryset = CustomModel.objects.using(self.client).filter(id__gt=2)
self.assertEqual(len(queryset), 1)
queryset = CustomModel.objects.using(self.client).filter(id__gte=2)
self.assertEqual(len(queryset), 2)
queryset = CustomModel.objects.using(self.client).filter(id__lt=4)
self.assertEqual(len(queryset), 3)
queryset = CustomModel.objects.using(self.client).filter(id__lt=1)
self.assertEqual(len(queryset), 0)
queryset = CustomModel.objects.using(self.client).filter(id__lte=1)
self.assertEqual(len(queryset), 1)
class Step:
parent = None
client = None
class GetSpec(Step):
parent = mock.call.resources
def __init__(self, api_version, kind, inherit=False):
self.api_version = api_version
self.kind = kind
self.inherit = inherit
@property
def call(self):
return self.parent.get(api_version=self.api_version, kind=self.kind)
class GetResource(Step):
parent = mock.call.resources.get()
def __init__(self, name, namespace, inherit=False, limit=False):
self.name = name
self.namespace = namespace
self.inherit = inherit
self.limit = limit
@property
def call(self):
params = {'name': self.name, 'namespace': self.namespace}
if self.limit:
params['_continue'] = None
params['limit'] = 100
return self.parent.get(**params)
class CreateResource(Step):
parent = mock.call.resources.get()
def __init__(
self,
template,
variable,
namespace="default",
inherit=False,
internal=False,
dry_run=False,
):
self.template = template
self.variable = variable
self.namespace = namespace
self.inherit = inherit
self.internal = internal
self.dry_run = dry_run
@property
def call(self):
loader = FileSystemLoader("./tests")
engine = jinja.JinjaEngine(self.client, loader=loader, internal=self.internal)
template_backend = backend.TemplateBackend()
template_backend.set_engine(engine)
body = template_backend.render(
self.namespace, self.template, self.variable, self.internal
)
params = {"body": body, "namespace": self.namespace}
if self.dry_run:
params["query_params"] = [("dryRun", "All")]
return self.parent.create(**params)
class UpdateResource(Step):
parent = mock.call.resources.get()
def __init__(
self,
template,
variable,
namespace="default",
inherit=False,
internal=False,
dry_run=False,
resource_version=None
):
self.template = template
self.variable = variable
self.namespace = namespace
self.inherit = inherit
self.internal = internal
self.dry_run = dry_run
self.resource_version = resource_version
@property
def call(self):
loader = FileSystemLoader("./tests")
engine = jinja.JinjaEngine(self.client, loader=loader, internal=self.internal)
template_backend = backend.TemplateBackend()
template_backend.set_engine(engine)
body = template_backend.render(
self.namespace, self.template, self.variable, self.internal
)
body["metadata"]["resourceVersion"] = self.resource_version
params = {"body": body, "namespace": self.namespace}
params["query_params"] = []
if self.dry_run:
params["query_params"] = [("dryRun", "All")]
return self.parent.replace(**params)
class DeleteResource(Step):
parent = mock.call.resources.get()
def __init__(self, name, namespace, inherit=False):
self.name = name
self.namespace = namespace
self.inherit = inherit
@property
def call(self):
return self.parent.delete(self.name, self.namespace)
class ToDict(Step):
parent = mock.call.resources.get().create()
def __init__(self, inherit=False):
self.inherit = inherit
@property
def call(self):
return self.parent.to_dict()
class ResourceCreateTestCase(BaseCase):
def assertQuery(self, steps, query):
expected_calls = []
for step in steps:
step.client = self.client
if step.inherit:
step.parent = expected_calls[-1]
expected_calls.append(step.call)
query()
self.assertSequenceEqual(self.dynamic_client.mock_calls, expected_calls)
def test_create_deployment(self):
mock_response = {
"metadata": {
"name": "foobar",
"namespace": "default",
"annotations": {"pharos.py/template": "test.yaml"},
}
}
self.dynamic_client.resources.get.return_value.create.return_value.to_dict.return_value = (
mock_response
)
expected_steps = [
GetSpec("v1", "Deployment"),
CreateResource("test.yaml", {"label_name": "foo"}, inherit=True),
ToDict(inherit=True),
GetSpec("apiextensions.k8s.io/v1", "CustomResourceDefinition"),
CreateResource("variable_crd.yaml", {}, inherit=True, internal=True),
ToDict(inherit=True),
GetSpec("pharos.py/v1", "Variable"),
CreateResource(
"variables.yaml",
{"name": "deployment-foobar-default", "value": {"label_name": "foo"}},
inherit=True,
internal=True,
),
ToDict(inherit=True),
]
query = lambda: models.Deployment.objects.using(self.client).create(
"test.yaml", {"label_name": "foo"}
)
self.assertQuery(expected_steps, query)
def test_create_deployment_namespace(self):
mock_response = {
"metadata": {
"name": "foobar",
"namespace": "test",
"annotations": {"pharos.py/template": "test.yaml"},
}
}
self.dynamic_client.resources.get.return_value.create.return_value.to_dict.return_value = (
mock_response
)
expected_steps = [
GetSpec("v1", "Deployment"),
CreateResource(
"test.yaml", {"label_name": "foo"}, inherit=True, namespace="test"
),
ToDict(inherit=True),
GetSpec("apiextensions.k8s.io/v1", "CustomResourceDefinition"),
CreateResource("variable_crd.yaml", {}, inherit=True, internal=True),
ToDict(inherit=True),
GetSpec("pharos.py/v1", "Variable"),
CreateResource(
"variables.yaml",
{"name": "deployment-foobar-test", "value": {"label_name": "foo"}},
inherit=True,
internal=True,
namespace="test",
),
ToDict(inherit=True),
]
query = lambda: models.Deployment.objects.using(self.client).create(
"test.yaml", {"label_name": "foo"}, namespace="test"
)
self.assertQuery(expected_steps, query)
def test_create_deployment_dry(self):
mock_response = {
"metadata": {
"name": "foobar",
"namespace": "default",
"annotations": {"pharos.py/template": "test.yaml"},
}
}
self.dynamic_client.resources.get.return_value.create.return_value.to_dict.return_value = (
mock_response
)
expected_steps = [
GetSpec("v1", "Deployment"),
CreateResource(
"test.yaml", {"label_name": "foo"}, inherit=True, dry_run=True
),
ToDict(inherit=True),
]
query = lambda: models.Deployment.objects.using(self.client).create(
"test.yaml", {"label_name": "foo"}, dry_run=True
)
self.assertQuery(expected_steps, query)
class ResourceUpdateTestCase(BaseCase):
def assertQuery(self, steps, query):
expected_calls = []
for step in steps:
step.client = self.client
if step.inherit:
step.parent = expected_calls[-1]
expected_calls.append(step.call)
query()
self.assertSequenceEqual(self.dynamic_client.mock_calls, expected_calls)
def test_sync_deployment(self):
mock_response = {
"metadata": {
"name": "foobar",
"namespace": "default",
"annotations": {"pharos.py/template": "test.yaml"},
}
}
self.dynamic_client.resources.get.return_value.create.return_value.to_dict.return_value = (
mock_response
)
self.dynamic_client.resources.get.return_value.replace.return_value.to_dict.return_value = (
mock_response
)
deployment = models.Deployment(
client=self.client,
k8s_object={
"metadata": {
"name": "nginx-deployment",
"annotations": {
"deployment.kubernetes.io/revision": "1",
},
"spec": {"selector": {"matchLabels": {"app": "test"}}},
}
},
)
query = lambda: deployment.sync("test.yaml", {"label_name": "foo"})
expected_steps = [
GetSpec("v1", "Deployment"),
GetResource('nginx-deployment', 'default', inherit=True),
ToDict(inherit=True),
GetSpec("v1", "Deployment"),
UpdateResource(
"test.yaml", {"label_name": "foo"}, inherit=True
),
ToDict(inherit=True),
GetSpec("apiextensions.k8s.io/v1", "CustomResourceDefinition"),
CreateResource("variable_crd.yaml", {}, inherit=True, internal=True),
ToDict(inherit=True),
GetSpec("pharos.py/v1", "Variable"),
DeleteResource('deployment-foobar-default', None),
GetSpec("pharos.py/v1", "Variable"),
CreateResource(
"variables.yaml",
{"name": "deployment-foobar-default", "value": {"label_name": "foo"}},
inherit=True,
internal=True,
),
ToDict(inherit=True),
]
self.assertQuery(expected_steps, query)
def test_update_deployment(self):
mock_response = {
"metadata": {
"name": "nginx-deployment",
"namespace": "default",
"annotations": {"pharos.py/template": "test.yaml"},
},
"json": {"label_name": "foo"},
}
self.dynamic_client.resources.get.return_value.get.return_value.to_dict.return_value = (
mock_response
)
self.dynamic_client.resources.get.return_value.replace.return_value.to_dict.return_value = (
mock_response
)
deployment = models.Deployment(
client=self.client,
k8s_object={
"metadata": {
"name": "nginx-deployment",
"annotations": {
"deployment.kubernetes.io/revision": "1",
"pharos.py/template": "test.yaml",
"pharos.py/variable": "deployment-nginx-deployment-default",
},
"spec": {"selector": {"matchLabels": {"app": "test"}}},
}
},
)
query = lambda: deployment.deploy()
expected_steps = [
GetSpec("v1", "Deployment"),
GetResource('nginx-deployment', 'default', inherit=True),
ToDict(inherit=True),
GetSpec("pharos.py/v1", "Variable"),
GetResource('deployment-nginx-deployment-default', 'default', inherit=True, limit=True),
ToDict(inherit=True),
GetSpec("v1", "Deployment"),
UpdateResource(
"test.yaml", {"label_name": "foo"}, inherit=True
),
ToDict(inherit=True),
GetSpec("pharos.py/v1", "Variable"),
UpdateResource(
"variables.yaml",
{"name": "deployment-nginx-deployment-default", "value": {"label_name": "foo"}},
namespace='default',
inherit=True,
internal=True,
),
ToDict(inherit=True)
]
self.assertQuery(expected_steps, query)
def test_update_deployment_dry(self):
mock_response = {
"metadata": {
"name": "nginx-deployment",
"namespace": "default",
"annotations": {"pharos.py/template": "test.yaml"},
},
"json": {"label_name": "foo"},
}
self.dynamic_client.resources.get.return_value.get.return_value.to_dict.return_value = (
mock_response
)
self.dynamic_client.resources.get.return_value.replace.return_value.to_dict.return_value = (
mock_response
)
deployment = models.Deployment(
client=self.client,
k8s_object={
"metadata": {
"name": "nginx-deployment",
"annotations": {
"deployment.kubernetes.io/revision": "1",
"pharos.py/template": "test.yaml",
"pharos.py/variable": "deployment-nginx-deployment-default",
},
"spec": {"selector": {"matchLabels": {"app": "test"}}},
}
},
)
query = lambda: deployment.deploy(dry_run=True)
expected_steps = [
GetSpec("v1", "Deployment"),
GetResource('nginx-deployment', 'default', inherit=True),
ToDict(inherit=True),
GetSpec("pharos.py/v1", "Variable"),
GetResource('deployment-nginx-deployment-default', 'default', inherit=True, limit=True),
ToDict(inherit=True),
GetSpec("v1", "Deployment"),
UpdateResource(
"test.yaml", {"label_name": "foo"}, inherit=True, dry_run=True
),
ToDict(inherit=True),
]
self.assertQuery(expected_steps, query)
def test_update_deployment_variable(self):
mock_response = {
"metadata": {
"name": "nginx-deployment",
"namespace": "default",
"annotations": {"pharos.py/template": "test.yaml"},
},
"json": {"label_name": "foo"},
}
self.dynamic_client.resources.get.return_value.get.return_value.to_dict.return_value = (
mock_response
)
self.dynamic_client.resources.get.return_value.replace.return_value.to_dict.return_value = (
mock_response
)
deployment = models.Deployment(
client=self.client,
k8s_object={
"metadata": {
"name": "nginx-deployment",
"annotations": {
"deployment.kubernetes.io/revision": "1",
"pharos.py/template": "test.yaml",
"pharos.py/variable": "deployment-nginx-deployment-default",
},
"spec": {"selector": {"matchLabels": {"app": "test"}}},
}
},
)
deployment.set_variable({"label_name": "bar"})
query = lambda: deployment.deploy()
expected_steps = [
GetSpec("v1", "Deployment"),
GetResource('nginx-deployment', 'default', inherit=True),
ToDict(inherit=True),
GetSpec("pharos.py/v1", "Variable"),
GetResource('deployment-nginx-deployment-default', 'default', inherit=True, limit=True),
ToDict(inherit=True),
GetSpec("v1", "Deployment"),
UpdateResource(
"test.yaml", {"label_name": "bar"}, inherit=True
),
ToDict(inherit=True),
GetSpec("pharos.py/v1", "Variable"),
UpdateResource(
"variables.yaml",
{"name": "deployment-nginx-deployment-default", "value": {"label_name": "bar"}},
namespace='default',
inherit=True,
internal=True,
),
ToDict(inherit=True)
]
self.assertQuery(expected_steps, query)
|
StarcoderdataPython
|
293923
|
<filename>operators/bias_operators.py
import random
import copy
import utils.constants as const
import utils.properties as props
import utils.exceptions as e
import utils.mutation_utils as mu
def operator_add_bias(model):
if not model:
print("raise,log we have probllems")
current_index = props.add_bias["current_index"]
tmp = model.get_config()
print("Adding bias to layer"+str(current_index))
if 'use_bias' in tmp['layers'][current_index]['config'] and not tmp['layers'][current_index]['config']['use_bias']:
tmp['layers'][current_index]['config']['use_bias'] = True
else:
raise e.AddAFMutationError(str(current_index), "Not possible to apply the add bias mutation to layer ")
model = mu.model_from_config(model, tmp)
return model
def operator_remove_bias(model):
if not model:
print("raise,log we have probllems")
current_index = props.remove_bias["current_index"]
tmp = model.get_config()
print("Removing bias from layer " + str(current_index))
if 'use_bias' in tmp['layers'][current_index]['config'] and tmp['layers'][current_index]['config']['use_bias']:
tmp['layers'][current_index]['config']['use_bias'] = False
else:
raise e.AddAFMutationError(str(current_index), "Not possible to apply the add bias mutation to layer ")
model = mu.model_from_config(model, tmp)
return model
|
StarcoderdataPython
|
1698263
|
<reponame>shrinandj/aim
from typing import Generic, Union, Tuple, List, TypeVar, Dict
from aim.storage.arrayview import ArrayView
from aim.storage.context import Context
from aim.storage.hashing import hash_auto
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from aim.sdk.run import Run
T = TypeVar('T')
class Sequence(Generic[T]):
"""Class representing single series of tracked value.
Objects series can be retrieved as Sequence regardless the object's type,
but subclasses of Sequence might provide additional functionality.
Provides interface to access tracked values, steps, timestamps and epochs.
Values, epochs and timestamps are accessed via :obj:`aim.storage.arrayview.ArrayView` interface.
"""
registry: Dict[str, 'Sequence'] = dict()
collections_allowed = False
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
subclass_typename = cls.sequence_name()
if subclass_typename is not None: # check for intermediate helper classes
cls.registry[subclass_typename] = cls
def __init__(
self,
name: str,
context: Context, # TODO ?dict
run: 'Run'
):
self.name = name
self.context = context
self.run = run
self._sequence_meta_tree = None
self._series_tree = run.series_run_tree.subtree((context.idx, name))
self._hash: int = None
def __repr__(self) -> str:
return f'<Metric#{hash(self)} name=`{self.name}` context=`{self.context}` run=`{self.run}`>'
@classmethod
def allowed_dtypes(cls) -> Union[str, Tuple[str, ...]]:
"""classmethod to get allowed object types for particular sequence
For example, numeric sequences a.k.a. Metric allow float and integer numbers.
The base Sequence allows any value, and to indicate that, `allowed_dtypes` returns '*'.
"""
return '*'
@classmethod
def sequence_name(cls) -> str:
"""classmethod to get retrieve sequence's registered name"""
...
def _calc_hash(self):
return hash_auto(
(self.name,
hash(self.context),
hash(self.run))
)
def __hash__(self) -> int:
if self._hash is None:
self._hash = self._calc_hash()
return self._hash
@property
def values(self) -> ArrayView:
"""Tracked values array as :obj:`ArrayView`.
:getter: Returns values ArrayView.
"""
return self._series_tree.array('val')
@property
def indices(self) -> List[int]:
"""Metric tracking steps as :obj:`list`.
:getter: Returns steps list.
"""
array_view = [i for i, _ in enumerate(self.values)]
return array_view
@property
def epochs(self) -> ArrayView:
"""Tracked epochs array as :obj:`ArrayView`.
:getter: Returns epochs ArrayView.
"""
return self._series_tree.array('epoch', dtype='int64')
@property
def timestamps(self) -> ArrayView:
"""Tracked timestamps array as :obj:`ArrayView`.
:getter: Returns timestamps ArrayView.
"""
return self._series_tree.array('time', dtype='int64')
@property
def _meta_tree(self):
if self._sequence_meta_tree is None:
self._sequence_meta_tree = self.run.meta_run_tree.subtree(('traces', self.context.idx, self.name))
return self._sequence_meta_tree
def __bool__(self) -> bool:
try:
return bool(self.values)
except ValueError:
return False
def __len__(self) -> int:
return len(self.values)
def preload(self):
self._series_tree.preload()
class MediaSequenceBase(Sequence):
"""Helper class for media sequence types."""
collections_allowed = True
def first_step(self):
"""Get sequence tracked first step.
Required to implement ranged and sliced data fetching.
"""
return self._meta_tree['first_step']
def last_step(self):
"""Get sequence tracked last step.
Required to implement ranged and sliced data fetching.
"""
return self._meta_tree['last_step']
def record_length(self):
"""Get tracked records longest list length or `None` if Text objects are tracked.
Required to implement ranged and sliced data fetching.
"""
return self._meta_tree.get('record_max_length', None)
|
StarcoderdataPython
|
1670268
|
<gh_stars>0
"""
@file setup.py
@date 2008-09-16
Contributors can be viewed at:
http://svn.secondlife.com/svn/linden/projects/2008/pyogp/CONTRIBUTORS.txt
$LicenseInfo:firstyear=2008&license=apachev2$
Copyright 2008, Linden Research, Inc.
Licensed under the Apache License, Version 2.0 (the "License").
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
or in
http://svn.secondlife.com/svn/linden/projects/2008/pyogp/LICENSE.txt
$/LicenseInfo$
"""
from setuptools import setup, find_packages
import os
version = '1.0'
setup(name='pyogp.lib.agentdomain',
version=version,
description="Library components implementing an Agent Domain",
long_description=open("README.txt").read() + "\n" +
open(os.path.join("docs", "HISTORY.txt")).read(),
# Get more strings from http://www.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
],
keywords='pyogp python awg ogp virtualworlds metaverse agentdomain',
author='Architecture Working Group',
author_email='<EMAIL>',
url='http://pyogp.net',
license='Apache License V2.0',
packages=find_packages(exclude=['ez_setup']),
namespace_packages=['pyogp', 'pyogp.lib'],
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
# -*- Extra requirements: -*-
'zope.component [zcml]',
'zope.interface',
'grokcore.component',
'pyogp.lib.base',
],
entry_points="""
# -*- Entry points: -*-
""",
)
|
StarcoderdataPython
|
6616746
|
# Generated by Django 2.2.2 on 2019-06-15 17:53
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Area',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('active', models.BooleanField(default=True)),
('description', models.TextField(max_length=500)),
('date', models.DateField()),
],
options={
'verbose_name': 'Area',
'verbose_name_plural': 'Areas',
},
),
]
|
StarcoderdataPython
|
11286101
|
import grpc
import numpy as np
import evocraft_ga.external.minecraft_pb2_grpc as minecraft_pb2_grpc
from evocraft_ga.external.minecraft_pb2 import * # noqa
class Spawner:
def __init__(
self,
start_x=20,
start_y=10,
start_z=20,
cube_len=10,
class_dict={0: AIR, 1: REDSTONE_BLOCK},
orientation=SOUTH,
):
self.start_x = start_x
self.start_y = start_y
self.start_z = start_z
self.cube_len = cube_len
self.class_dict = class_dict
self.class_dict[-1.0] = AIR
self.orientation = orientation
self.channel = grpc.insecure_channel("localhost:5001")
self.client = minecraft_pb2_grpc.MinecraftServiceStub(self.channel)
def create_block(self, x, y, z, block_type):
return Block(
position=Point(x=x, y=y, z=z),
type=block_type,
orientation=self.orientation,
)
def clear_blocks(self, x_min, y_min, z_min):
self.client.fillCube(
FillCubeRequest( # Clear a 20x10x20 working area
cube=Cube(
min=Point(x=x_min, y=y_min, z=z_min),
max=Point(
x=x_min + self.cube_len,
y=y_min + self.cube_len,
z=z_min + self.cube_len,
),
),
type=AIR,
)
)
def populate_arr(self, arr, x_min, y_min, z_min):
blocks = []
# self.clear_blocks(x_min,y_min,z_min)
for coord in np.ndindex(arr.shape):
block = self.create_block(
x=x_min + coord[0],
y=y_min + coord[1],
z=z_min + coord[2],
block_type=self.class_dict[arr[coord]],
)
blocks.append(block)
blocks = Blocks(blocks=blocks)
self.client.spawnBlocks(blocks)
def populate(self, cube_probs, clear_population=False):
if clear_population:
self.clear_population(len(cube_probs))
for i in range(len(cube_probs)):
dist = i * 5 + i * self.cube_len
self.populate_arr(
cube_probs[i], self.start_x + dist, self.start_y, self.start_z
)
def clear_population(self, population_size):
for i in range(population_size):
dist = i * 5 + i * self.cube_len
self.clear_blocks(self.start_x + dist, self.start_y, self.start_z)
|
StarcoderdataPython
|
392272
|
<reponame>manulangat1/Jaza-ndai<filename>backend/blocks/fl.py
# import flask
from flask import Flask, jsonify,url_for
from twilio.twiml.voice_response import VoiceResponse
from twilio.rest import Client
import json
# Declare Flask application
from flask_web3 import current_web3, FlaskWeb3
TWILIO_ACCOUNT_SID = "ACc8e3a5361026364333bcd339433f54f9"
TWILIO_AUTH_TOKEN = "74ca46f63ad8f45bc5fbb642ad510b58"
TWILIO_NUMBER = "+1 207 477 7406"
from twilio.twiml.voice_response import VoiceResponse
from twilio.rest import Client
app = Flask(__name__)
# from ..models import Appointment
# from ..backend.models import Appointment
# Set Flask-Web3 configuration
app.config.update({'ETHEREUM_PROVIDER': 'http', 'ETHEREUM_ENDPOINT_URI': 'http://localhost:8545'})
# Declare Flask-Web3 extension
web3 = FlaskWeb3(app=app)
def load_into_db():
print("hey")
# Declare route
@app.route('/blockNumber')
def block_number():
print(current_web3.isConnected())
print(current_web3.eth.blockNumber)
accnt = current_web3.eth.accounts[0]
accnt1 = current_web3.eth.accounts[1]
print(accnt,"",accnt1)
print(web3.toWei(1, 'ether'))
print(web3.isAddress(accnt))
tx = web3.eth.sendTransaction({'to': accnt1, 'from': accnt, 'value': 12345})
# print(tx)
t = tx.decode('ISO-8859-1')
print(t)
return jsonify({'data': current_web3.eth.blockNumber})
# export const PayView = () => dispatch => {
# axios.get("/api/payment")
# .then(res => {
# dispatch({
# type:PAY,
# payload:res.data
# })
# })
# .catch(err => console.log(err))
# }
@app.route('/call',methods=['GET','POST'])
def call():
twilio_client = Client(TWILIO_ACCOUNT_SID,
TWILIO_AUTH_TOKEN)
twilio_client.calls.create(from_=TWILIO_NUMBER,
to="+254740415950",
url=url_for('.outbound',
_external=True))
return jsonify({'message': 'Call incoming!'})
@app.route('/outbound', methods=['POST'])
def outbound():
response = VoiceResponse()
response.say("Thank you for contacting our sales department. If this "
"click to call application was in production, we would "
"dial out to your sales team with the Dial verb.",
voice='alice')
response.number("+254740415950")
return str(response)
|
StarcoderdataPython
|
8098624
|
# folding.py
import openpyxl
def folding(path, rows=None, cols=None, hidden=True):
workbook = openpyxl.Workbook()
sheet = workbook.active
if rows:
begin_row, end_row = rows
sheet.row_dimensions.group(begin_row, end_row, hidden=hidden)
if cols:
begin_col, end_col = cols
sheet.column_dimensions.group(begin_col, end_col, hidden=hidden)
workbook.save(path)
if __name__ == "__main__":
folding("folded.xlsx", rows=(1, 5), cols=("C", "F"))
|
StarcoderdataPython
|
29127
|
<gh_stars>1-10
"""
Show the INI config(s) used by a command tree.
"""
from .. import command
class Show(command.Command):
""" Show current INI configuration.
Programs may make use of a configuration file which is usually located in
your $HOME directory as .<prog>_config. The file is a standard INI
style config file where each `[section]` is the full path of a command
including spaces. """
name = 'show'
def setup_args(self, parser):
self.add_argument('section', nargs='?', help='Only show config for '
'this section.')
self.add_argument('--all', '-a', action='store_true', help='Show '
'all sections')
super().setup_args(parser)
def run(self, args):
if args.section:
try:
config = {args.section: self.session.config[args.section]}
except KeyError:
raise SystemExit("Invalid section: %s" % args.section)
else:
config = self.session.config
for section, values in config.items():
if values or args.all:
print("[%s]" % section)
for k, v in values.items():
print(" %s = %s" % (k, v))
print()
class INI(command.Command):
""" INI style configuration.
Commands support user configuration in an INI style config file. """
name = 'ini'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.add_subcommand(Show, default=True)
|
StarcoderdataPython
|
1849575
|
"""<NAME>'s aospy.Proj object for collaboration w/ Natalie Burls."""
import datetime
import os
from aospy.proj import Proj
from aospy.model import Model
from aospy.run import Run
from aospy_user import regions
_ROOT = os.path.join(os.environ['HOME'], 'Dropbox/projects/gms_natalie_burls')
cam_2xco2 = Run(
name='2xco2',
description='Coupled model w/ doubled CO2',
data_direc=os.path.join(_ROOT, 'cam_output'),
data_dir_struc='one_dir',
data_dur=1,
data_start_date=datetime.datetime(700, 2, 1),
data_end_date=datetime.datetime(700, 2, 28),
data_files={name: 'abrupt2xCO2_T31_gx3v7.cam2.h0.0700-01.nc' for name in
['temp', 'precip', 'hght', 'sphum', 'vcomp', 'ps', 'bk', 'pk',
'pfull', 'phalf']}
)
cam = Model(
name='cam',
description='NCAR CAM',
grid_file_paths=os.path.join(
_ROOT,
'cam_output/abrupt2xCO2_T31_gx3v7.cam2.h0.0700-01.nc'
# 'cam_output/abrupt2xCO2_T31_gx3v7_ANN_climo.701.800.nc'
),
data_dur=1,
data_start_date=datetime.datetime(700, 1, 1),
data_end_date=datetime.datetime(800, 12, 31),
runs=[cam_2xco2],
)
burls = Proj(
'burls',
direc_out=os.path.join(_ROOT, 'aospy_output'),
models=[cam],
regions=(
regions.globe,
regions.nh,
regions.sh,
regions.tropics,
regions.wpwp,
regions.epac,
regions.sahel,
regions.sahel2,
regions.sahel3,
regions.sahara,
regions.ind_monsoon,
regions.land,
regions.ocean,
regions.trop_land,
regions.trop_ocean,
regions.sahel_south,
regions.sahel_north,
regions.sahel_east,
regions.sahel_west,
regions.east_asia_monsoon,
regions.china_east,
regions.china_west,
)
)
|
StarcoderdataPython
|
6628977
|
<filename>problems/1021-remove-outermost-parentheses.py
class Solution:
"""
有效括号字符串为空 ("")、"(" + A + ")" 或 A + B,其中 A 和 B 都是有效的括号字符串,+ 代表字符串的连接。
例如,"","()","(())()" 和 "(()(()))" 都是有效的括号字符串。
如果有效字符串 S 非空,且不存在将其拆分为 S = A+B 的方法,我们称其为原语(primitive),其中 A 和 B 都是非空有效括号字符串。
给出一个非空有效字符串 S,考虑将其进行原语化分解,使得:S = P_1 + P_2 + ... + P_k,其中 P_i 是有效括号字符串原语。
对 S 进行原语化分解,删除分解中每个原语字符串的最外层括号,返回 S 。
示例 1:
输入:"(()())(())"
输出:"()()()"
解释:
输入字符串为 "(()())(())",原语化分解得到 "(()())" + "(())",
删除每个部分中的最外层括号后得到 "()()" + "()" = "()()()"。
示例 2:
输入:"(()())(())(()(()))"
输出:"()()()()(())"
解释:
输入字符串为 "(()())(())(()(()))",原语化分解得到 "(()())" + "(())" + "(()(()))",
删除每隔部分中的最外层括号后得到 "()()" + "()" + "()(())" = "()()()()(())"。
示例 3:
输入:"()()"
输出:""
解释:
输入字符串为 "()()",原语化分解得到 "()" + "()",
删除每个部分中的最外层括号后得到 "" + "" = ""。
"""
def removeOuterParentheses(self, s: str) -> str:
"""
将字符串每个字符入栈,遇到括号就移除。
每次栈空,则将索引在 s[start+1:end] 加入结果集(移除最外层括号)
:param s:
:return:
"""
stack, result, start, end = [], [], 0, 0
for idx, c in enumerate(s):
if stack:
if stack[-1] == '(' and c == ')':
stack.pop(-1)
else:
stack.append(c)
if not stack:
end = idx
result.append(s[start+1:end])
start = idx + 1
else:
stack.append(c)
return ''.join(result)
if __name__ == '__main__':
tests = [
('(()())(())', '()()()'),
('(()())(())(()(()))', '()()()()(())')
]
for i, o in tests:
assert Solution().removeOuterParentheses(i) == o
|
StarcoderdataPython
|
9604900
|
# Generated by Django 3.0.6 on 2020-05-05 14:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0007_auto_20200505_0704'),
]
operations = [
migrations.AlterField(
model_name='config',
name='sslexpire',
field=models.DateField(default='1970-01-01'),
),
]
|
StarcoderdataPython
|
1664854
|
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from hyperion.util.integrate import integrate_subset
from scipy import stats
from .units import ConvertUnits
class MakePlots(object):
'''
Plots slices of the val cube from SyntheticCube a the closest slice
to wav_interest or val images of SyntheticImage. The boundaries of
the slice or image is ploted as well. The plot has 4 different cut
levels at 100, 99, 95 and 90 percent.
Parameters
----------
input_array : SyntheticCube, SyntheticImage, optional
input_array also reads arrays with SyntheticCube and
SyntheticImage properties.
wav_interest : float, ``None``
* float : wavelength close to slice in microns.
* ``None`` : Only if input_array is SyntheticImage like
prefix : str
Name of the image. Default naming chain is switched off.
name : str
Name of image within the default naming chain to distinguish the
plot files. E. g. 'PSF_gaussian'
mulit_cut : ``True``, ``None``
* ``True`` : plots chosen image slice at cuts of [100, 99, 95, 90]%.
* ``None`` : no mulit-plot is returned.
Default is ``None``.
single_cut : float, ``None``
* float : cut level for single plot of image slice between 0 and 100.
* ``None`` : no single plot is returned.
set_cut : tuple, ``None``
* tuple : set_cut(v_min, v_max)
Minimal and Maximal physical value presented in the colorbars.
* ``None`` : no plot with minimal and maximal cut is returned.
Default is ``None``.
dpi : ``None``, scalar > 0
The resolution in dots per inch.
``None`` is default and will use the value savefig.dpi
in the matplotlibrc file.
Returns
-------
cube : SyntheticCube
3D val array with SyntheticCube properties.
image : SyntheticImage
2D val array with SyntheticImage properties.
'''
def __init__(self, input_array, wav_interest=None, prefix=None, name=None, multi_cut=None, single_cut=None, set_cut=None, dpi=None):
if multi_cut == None and single_cut == None and set_cut == None:
raise Exception('At least one plotting routine (multi_cut, single_cut or set_cut == None) has to be chosen.')
self.prefix = prefix
if self.prefix is None and name is None:
raise Exception('If prefix name is not given, you need to give the a name to enable the default naming chain.')
if input_array.val.ndim in (2, 3):
# input_array properties
self.input_name = name
self.name = input_array.name
self.unit_out = input_array.unit_out
self.val = input_array.val
self.wav = input_array.wav
self.wav_interest = wav_interest
self.filter = input_array.filter
#print self.filter
self.grid_unit = input_array.grid_unit
self.grid_unit_name = input_array.grid_unit_name
# measure of the image
self.FOV = input_array.FOV
self.x_min = input_array.x_min
self.x_max = input_array.x_max
self.y_min = input_array.y_min
self.y_max = input_array.y_max
self.pixel = input_array.pixel
self.pixel_2D = self.pixel[0] * self.pixel[1]
#print self.val.shape
# condition to find slice close to wav_interest
if input_array.val.ndim == 3:
if self.wav_interest is None and self.filter['waf_min'] is None:
raise Exception('WARNING: wav_interest or waf_0 need to be defined if 3D cube is pasted.')
find_minimum = np.abs(self.wav - self.wav_interest)
num = np.arange(len(self.wav))
index = num[find_minimum == np.min(find_minimum)][0]
wav_min = 10. ** (np.log10(self.wav[index]) - input_array.spacing_wav / 2.)
wav_max = 10. ** (np.log10(self.wav[index]) + input_array.spacing_wav / 2.)
self.val_2D = self.val[:, :, index]
self.wav_real = (round(wav_min, 2), round(wav_max, 2))
# condition for image
if input_array.val.ndim == 2:
self.val_2D = self.val.copy()
self.wav_real = (round(self.filter['waf_min'], 2), round(self.filter['waf_max'], 2))
else:
raise Exception('WARNING: MakePlots only can use SyntheticCube or SyntheticImage.')
# creat cut levels
self.val_sort = np.sort(self.val_2D.ravel())
self.xx = np.linspace(0, len(self.val_sort), len(self.val_sort))
# statstics
self.median = stats.scoreatpercentile(self.val_sort, 50)
self.min_0 = stats.scoreatpercentile(self.val_sort, 0)
self.min_5 = stats.scoreatpercentile(self.val_sort, 5)
self.max_95 = stats.scoreatpercentile(self.val_sort, 95)
self.max_100 = stats.scoreatpercentile(self.val_sort, 100)
# grid of X, Y plot
x = np.linspace(self.x_min / self.grid_unit, self.x_max / self.grid_unit, self.pixel[0])
y = np.linspace(self.y_min / self.grid_unit, self.y_max / self.grid_unit, self.pixel[1])
X, Y = np.meshgrid(y,x)
label = 'Flux [' + self.unit_out + ']'
# titel of plot
titel = self.name + ' ' + str(self.wav_real) + ' micron'
# ploting multiple plot
if multi_cut is not None:
fig2 = plt.figure()
fig2.suptitle(titel, fontsize=10.)
fig2.subplots_adjust(hspace=0.3, wspace=0.3)
font = {'size': 6}
mpl.rc('font', **font)
a = np.array([100, 99, 95, 90])
b = np.array([1, 2])
c = np.array([1, 1])
for l in range(len(a)):
k = l + 1
ax = fig2.add_subplot(2, 2, k)
title = str(int(a[l])) + ' %'
plt.title(title)
self.percentage = a[l]
self.percent = self.percentage / 2.
lower_cut = (100 - a[l]) / 2.
upper_cut = lower_cut + a[l]
self.min = stats.scoreatpercentile(self.val_sort, lower_cut)
self.max = stats.scoreatpercentile(self.val_sort, upper_cut)
vmin = self.min
vmax = self.max
self.norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
c = ax.pcolormesh(X, Y, self.val_2D, cmap=plt.cm.gist_heat, norm=self.norm)
ax.set_xlim(x[0], x[-1])
ax.set_ylim(y[0], y[-1])
ax.set_xlabel('x [' + self.grid_unit_name + ']')
ax.set_ylabel('y [' + self.grid_unit_name + ']')
cb = fig2.colorbar(c)
cb.set_label(label)
if self.prefix is None:
self.plot_name = self.name + '_image_' + self.input_name + '_multi_cut_' + str(self.wav_real[0]) + '_' + str(self.wav_real[1]) + '.png'
if self.prefix is not None:
self.plot_name = self.prefix + '.png'
fig2.savefig(self.plot_name, bbox_inches='tight', dpi=dpi)
# single plot for certain cut if cut is not None
if single_cut is not None:
fig3 = plt.figure()
fig3.suptitle(titel, fontsize=10.)
fig3.subplots_adjust(hspace=0.3, wspace=0.3)
font = {'size': 6}
mpl.rc('font', **font)
a = np.array([single_cut])
ax = fig3.add_subplot(1, 1, 1)
title = str(int(a[0])) + ' %'
plt.title(title)
lower_cut = (100 - single_cut) / 2.
upper_cut = lower_cut + single_cut
self.min = stats.scoreatpercentile(self.val_sort, lower_cut)
self.max = stats.scoreatpercentile(self.val_sort, upper_cut)
vmin = self.min
vmax = self.max
self.norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
c = ax.pcolormesh(X, Y, self.val_2D, cmap=plt.cm.gist_heat, norm=self.norm)
ax.set_xlim(x[0], x[-1])
ax.set_ylim(y[0], y[-1])
ax.set_xlabel('x [' + self.grid_unit_name + ']')
ax.set_ylabel('y [' + self.grid_unit_name + ']')
cb = fig3.colorbar(c)
cb.set_label(label)
if self.prefix is None:
self.plot_name = self.name + '_image_' + self.input_name + '_single_cut_' + str(single_cut) + '%_' + str(self.wav_real[0]) + '_' + str(self.wav_real[1]) + '.png'
if self.prefix is not None:
self.plot_name = self.prefix + '.png'
fig3.savefig(self.plot_name, bbox_inches='tight', dpi=dpi)
# single plot for certain cut if cut is not None
if set_cut is not None:
fig4 = plt.figure()
fig4.suptitle(titel, fontsize=10.)
fig4.subplots_adjust(hspace=0.3, wspace=0.3)
font = {'size': 6}
mpl.rc('font', **font)
# min max
vmin = set_cut[0]
vmax = set_cut[1]
ax = fig4.add_subplot(1, 1, 1)
title = '[' + str("%0.2e" % vmin) + ', ' + str("%0.2e" % vmax) + ']'
title2 = 'flux values within ' + title + ' ' + self.unit_out
plt.title(title2)
self.norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
#print X.shape, Y.shape, self.val_2D.shape
c = ax.pcolormesh(X, Y, self.val_2D, cmap=plt.cm.gist_heat, norm=self.norm)
ax.set_xlim(x[0], x[-1])
ax.set_ylim(y[0], y[-1])
ax.set_xlabel('x [' + self.grid_unit_name + ']')
ax.set_ylabel('y [' + self.grid_unit_name + ']')
cb = fig4.colorbar(c)
cb.set_label(label)
if self.prefix is None:
self.plot_name = self.name + '_image_' + self.input_name + '_set_cut_' + str("%0.2e" % vmin) + '_' + str("%0.2e" % vmax) + '_' + str(self.wav_real[0]) + '_' + str(self.wav_real[1]) + '.png'
if self.prefix is not None:
self.plot_name = self.prefix + '.png'
fig4.savefig(self.plot_name, bbox_inches='tight', dpi=dpi)
def histogram_cuts(self, dpi=None):
'''
DS9 like histograms of the cuts can be checked here.
Parameters
----------
dpi : None, scalar > 0
The resolution in dots per inch.
``None`` is default and will use the val savefig.dpi
in the matplotlibrc file.
'''
# Histograms of cut levels
fig = plt.figure()
fig.suptitle(self.name, fontsize=10.)
fig.subplots_adjust(hspace=0.3)
ax1 = fig.add_subplot(2, 1, 1)
plt.semilogy(self.val_sort[::-1], self.xx, 'b-')
plt.semilogy([self.median, self.median], [self.xx[0] + 1., self.xx[-1]], 'r-')
# min max
plt.semilogy([self.min_0, self.min_0], [self.xx[0] + 1., self.xx[-1]], 'g-')
plt.semilogy([self.min_5, self.min_5], [self.xx[0] + 1., self.xx[-1]], 'y-')
plt.semilogy([self.max_95, self.max_95], [self.xx[0] + 1., self.xx[-1]], 'y-')
plt.semilogy([self.max_100, self.max_100], [self.xx[0] + 1., self.xx[-1]], 'g-')
ax1.set_xlabel('val distribution')
ax1.set_ylabel('Number of pixels')
ax2 = fig.add_subplot(2, 1, 2)
plt.plot(self.val_sort)
ax2.set_xlabel('Number of pixels')
ax2.set_ylabel('val distribution')
ax2.set_xlim(self.xx[0], self.xx[-1])
ax2.set_ylim(self.val_sort[0], self.val_sort[-1])
if self.prefix is None:
self.plot_name = self.name + '_image_' + self.input_name + '_' + str(self.wav_real[0]) + '_' + str(self.wav_real[1]) + '_histogram.png'
if self.prefix is not None:
self.plot_name = self.prefix + '_histogram.png'
fig.savefig(self.plot_name, bbox_inches='tight', dpi=dpi)
|
StarcoderdataPython
|
1825396
|
<reponame>dikyindrah/Python-Pemrograman-Dasar<filename>21-Operator Perbandingan/Script.py
# Operator perbandingan
print('\n==========Operator Perbandingan==========\n')
x = int(input('Masukan nilai x: '))
y = int(input('Masukan nilai y: '))
print('')
print(x, '==', y, ' =', (x==y))
print(x, '!=', y, ' =', (x!=y))
print(x, '<', y, ' =', (x<y))
print(x, '>', y, ' =', (x>y))
print(x, '<=', y, ' =', (x<=y))
print(x, '>=', y, ' =', (x>=y))
print('')
|
StarcoderdataPython
|
1792348
|
from .. import testing
class CountIfTest(testing.FunctionalTestCase):
filename = "IF.xlsx"
def test_evaluation_ABCDE_1(self):
for col in "ABCDE":
cell = f'Sheet1!{col}1'
excel_value = self.evaluator.get_cell_value(cell)
value = self.evaluator.evaluate(cell)
self.assertEqual(excel_value, value)
|
StarcoderdataPython
|
179639
|
<reponame>jeremycward/ipp-core<gh_stars>1-10
import pandas as pd
def header_cols(header):
return (header.name,
header.storage_method,
header.path,
str(header.memory_style),
header.description)
def mtx_headers_as_dataframe(matrix_headers):
record_data = [header_cols(i) for i in matrix_headers]
return pd.DataFrame.from_records(data= record_data, columns= ["name", "storage method", "path", "mem style", "description"])
|
StarcoderdataPython
|
6557314
|
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
from nltk import word_tokenize
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server
app.layout = html.Div([
html.Label('Escribamos números en mapuzugun!',style={'color': 'black', 'fontSize': 20}),
html.Label('Dime un número entre 1 y 9999',style={'color': '#8B008B', 'fontSize': 20, 'font-weight': 'bold'}),
dcc.Input(id='my-id', value=1, type='text'),
html.Div(id='my-div',style={'color': 'black', 'fontSize': 16}),#'font-weight': 'bold'
html.H3('Si quieres cooperar con esta iniciativa escríbeme a <EMAIL>',style={'fontSize': 14})
])
numbers_1_9={'kiñe':1,'epu':2,'kvla':3,'meli':4,'kecu':5,'kayu':6,'reqle':7,'pura':8,'ayja':9}
numbers_10_1000={'mari':10,'pataka':100,'waragka':1000}
def numbers_map_decimal(string):
string=word_tokenize(string)
n=len(string)
if n==1:
token=string[0]
if token in numbers_1_9.keys():
return numbers[token]
if token in numbers_10_1000.keys():
return numbers[token]
elif n==2:
if string[1] in numbers_1_9:
return numbers_10_1000[string[0]]+numbers_1_9[string[1]]
if string[0] in numbers_1_9:
return numbers_1_9[string[0]]*numbers_10_1000[string[1]]
else:
s=0
if 'mari' in string:
if string[-1] in numbers_1_9:
s+=numbers_1_9[string[-1]]
mari_index=string.index('mari')
if string[mari_index-1] in numbers_1_9:
s=s+numbers_1_9[string[mari_index-1]]*10
else:
s=s+10
if 'pataka' in string:
pataka_index=string.index('pataka')
if string[pataka_index-1] in numbers_1_9:
s=s+numbers_1_9[string[pataka_index-1]]*100
else:
s=s+100
if 'warangka' in string:
warangka_index=string.index('waragka')
if string[warangka_index-1] in numbers_1_9:
s=s+numbers_1_9[string[warangka_index-1]]*1000
else:
s=s+1000
return s
# In[2]:
words_1_10={1:'kiñe',2:'epu',3:'kvla',4:'meli',5:'kecu',6:'kayu',7:'reqle',8:'pura',9:'ayja',10:'mari'}
def decimal_to_map_99(number):
components=[int(i) for i in str(number)]
if number<=10:
return words_1_10[number]
if number>10 and number<20:
return 'mari'+' '+words_1_10[components[1]]
if number>=20 and number<=99:
if components[1]==0:
return words_1_10[components[0]]+' '+'mari'
else:
return words_1_10[components[0]]+' '+'mari'+' '+words_1_10[components[1]]
def decimal_to_map_999(number):
hundred=int(str(number)[0])
if number<100:
return decimal_to_map_99(number)
elif number==100:
return 'pataka'
elif number%100==0 and number>100 and number<1000:
return words_1_10[hundred]+' '+'pataka'
else:
if hundred==1:
return 'pataka'+' '+decimal_to_map_99(int(str(number)[1:]))
else:
return words_1_10[hundred]+' '+'pataka'+' '+decimal_to_map_99(int(str(number)[1:]))
def decimal_to_map_9999(number):
thousand=int(str(number)[0])
if number<1000:
return decimal_to_map_999(number)
elif number==1000:
return 'waragka'
elif number%1000==0 and number>1000 and number<10000:
return words_1_10[thousand]+' '+'pataka'
else:
if thousand==1:
return 'waragka'+' '+decimal_to_map_999(int(str(number)[1:]))
else:
return words_1_10[thousand]+' '+'waragka'+' '+decimal_to_map_999(int(str(number)[1:]))
# In[3]:
words_1_10_esp={1:'uno',2:'dos',3:'tres',4:'cuatro',5:'cinco',6:'seis',7:'siete',8:'ocho',9:'nueve',10:'diez'}
words_11_20_esp={11:'once',12:'doce',13:'trece',14:'catorce',15:'quince',16:'dieciseis',17:'diecisiete',18:'dieciocho',
19:'diecinueve',20:'veinte'}
words_20_90_esp={2:'veinti',3:'treinta',4:'cuarenta',5:'cincuenta',6:'sesenta',7:'setenta',8:'ochenta',
9:'noventa'}
def decimal_to_map_99_esp(number):
components=[int(i) for i in str(number)]
if number<=10:
return words_1_10_esp[number]
if number>10 and number<20:
return words_11_20_esp[number]
if number>=20 and number<=99:
if number==20:
return words_11_20_esp[number]
if components[1]==0:
return words_20_90_esp[components[0]]
if components[1]!=0 and number<30:
return words_20_90_esp[components[0]]+words_1_10_esp[components[1]]
if components[1]!=0 and number>=30:
return words_20_90_esp[components[0]]+' '+'y'+' '+words_1_10_esp[components[1]]
def decimal_to_map_999_esp(number):
hundred=int(str(number)[0])
if number<100:
return decimal_to_map_99_esp(number)
elif number==100:
return 'cien'
elif number%100==0 and number>100 and number<1000:
if hundred==5:
return 'quinientos'
if hundred==7:
return 'setecientos'
if hundred==9:
return 'novecientos'
else:
return words_1_10_esp[hundred]+'cientos'
else:
if hundred==1:
return 'ciento'+' '+decimal_to_map_99_esp(int(str(number)[1:]))
else:
if hundred==5:
return 'quinientos'+' '+decimal_to_map_99_esp(int(str(number)[1:]))
if hundred==7:
return 'setecientos'+' '+decimal_to_map_99_esp(int(str(number)[1:]))
if hundred==9:
return 'novecientos'+' '+decimal_to_map_99_esp(int(str(number)[1:]))
else:
return words_1_10_esp[hundred]+'cientos'+' '+decimal_to_map_99_esp(int(str(number)[1:]))
def decimal_to_map_9999_esp(number):
thousand=int(str(number)[0])
if number<1000:
return decimal_to_map_999_esp(number)
elif number==1000:
return 'mil'
elif number%1000==0 and number>1000 and number<10000:
return words_1_10_esp[thousand]+' '+'mil'
else:
if thousand==1:
return 'mil'+' '+decimal_to_map_999_esp(int(str(number)[1:]))
else:
return words_1_10_esp[thousand]+' '+'mil'+' '+decimal_to_map_999_esp(int(str(number)[1:]))
def map_esp(number):
return decimal_to_map_9999(number)#+' | '+decimal_to_map_9999_esp(number)
@app.callback(
Output(component_id='my-div', component_property='children'),
[Input(component_id='my-id', component_property='value')]
)
def update_output_div(input_value):
try:
input_value=int(input_value)
except ValueError:
return 'Solo traduzco números :)'
if input_value < 1 or input_value > 9999:
return 'Aún no podemos traducir números en ese rango :('
else:
#return 'En mapuzugun, el número "{}" se dice'.format(input_value)+' "'+map_esp(input_value)+'"'
return (html.P(['En mapuzugun, el número "{}" se dice'.format(input_value),html.Br(),html.Strong(map_esp(input_value), style={'color': '#8B008B', 'fontSize': 20})]))
if __name__ == '__main__':
app.run_server(debug=True)
|
StarcoderdataPython
|
1728600
|
from days import AOCDay, day
import math
from collections import defaultdict
def borders(data):
top = data[0]
right = ''.join(line[-1] for line in data)
bottom = data[-1]
left = ''.join(line[0] for line in data)
return (top, right, bottom, left)
def mirrors(data):
mirrors = [data]
mirrors.append(data[::-1])
mirrors.append([row[::-1] for row in data])
mirrors.append([row[::-1] for row in data][::-1])
return mirrors
def rotations(data):
rotations = [data]
cur = data
for _ in range(3):
data = [line[:] for line in data]
for x in range(len(data)):
for y in range(len(data[x])):
data[x][y] = cur[len(data[x]) - y - 1][x]
cur = data
rotations.append(data)
return rotations
def all_options(data):
options = []
for mirror in mirrors(data):
options.extend(rotations(mirror))
# Remove duplicates
result = []
for option in options:
if option not in result:
result.append(option)
return result
@day(20)
class Day20(AOCDay):
print_debug = "c12"
test_input = """Tile 2311:
..##.#..#.
##..#.....
#...##..#.
####.#...#
##.##.###.
##...#.###
.#.#.#..##
..#....#..
###...#.#.
..###..###
Tile 1951:
#.##...##.
#.####...#
.....#..##
#...######
.##.#....#
.###.#####
###.##.##.
.###....#.
..#.#..#.#
#...##.#..
Tile 1171:
####...##.
#..##.#..#
##.#..#.#.
.###.####.
..###.####
.##....##.
.#...####.
#.##.####.
####..#...
.....##...
Tile 1427:
###.##.#..
.#..#.##..
.#.##.#..#
#.#.#.##.#
....#...##
...##..##.
...#.#####
.#.####.#.
..#..###.#
..##.#..#.
Tile 1489:
##.#.#....
..##...#..
.##..##...
..#...#...
#####...#.
#..#.#.#.#
...#.#.#..
##.#...##.
..##.##.##
###.##.#..
Tile 2473:
#....####.
#..#.##...
#.##..#...
######.#.#
.#...#.#.#
.#########
.###.#..#.
########.#
##...##.#.
..###.#.#.
Tile 2971:
..#.#....#
#...###...
#.#.###...
##.##..#..
.#####..##
.#..####.#
#..#.#..#.
..####.###
..#.#.###.
...#.#.#.#
Tile 2729:
...#.#.#.#
####.#....
..#.#.....
....#..#.#
.##..##.#.
.#.####...
####.#.#..
##.####...
##..#.##..
#.##...##.
Tile 3079:
#.#.#####.
.#..######
..#.......
######....
####.#..#.
.#...#.##.
#.#####.##
..#.###...
..#.......
..#.###...""".split("\n")
MONSTER_PATTERN = ''' #
# ## ## ###
# # # # # # '''.split("\n")
tiles = {}
grid = {}
def common(self, input_data):
input_data = "\n".join(input_data).split("\n\n")
self.tiles = {}
self.grid = {}
for tile in input_data:
tile = tile.split("\n")
tile_id = int(tile[0].split(" ")[1].replace(":", ""))
self.tiles[tile_id] = [list(l) for l in tile[1:]]
def generate_tiling(self, tile_map):
puzzle_size = math.isqrt(len(tile_map))
tiles = [[None for _ in range(puzzle_size)] for _ in range(puzzle_size)]
def generate_tiling_recurse(tiles, x, y, seen):
if y == puzzle_size:
return tiles
next_x, next_y = x + 1, y
if next_x == puzzle_size:
next_x, next_y = 0, next_y + 1
for tile_id, options in tile_map.items():
if tile_id in seen:
continue
seen.add(tile_id)
for index, borders in options.items():
top, left = borders[0], borders[3]
if x > 0:
neighbour_id, neighbour_orientation = tiles[x - 1][y]
neighbour_right = tile_map[neighbour_id][neighbour_orientation][1]
if neighbour_right != left:
continue
if y > 0:
neighbour_id, neighbour_orientation = tiles[x][y - 1]
neighbour_bottom = tile_map[neighbour_id][neighbour_orientation][2]
if neighbour_bottom != top:
continue
tiles[x][y] = (tile_id, index)
answer = generate_tiling_recurse(tiles, next_x, next_y, seen)
if answer is not None:
return answer
seen.remove(tile_id)
tiles[x][y] = None
return None
return generate_tiling_recurse(tiles, 0, 0, set())
def part1(self, input_data):
tile_options = {tile_id: all_options(tile) for tile_id, tile in self.tiles.items()}
tile_map = defaultdict(dict)
for tile_id, options in tile_options.items():
for index, tile in enumerate(options):
tile_map[tile_id][index] = borders(tile)
tiling = self.generate_tiling(tile_map)
corners = [tiling[0][0], tiling[0][-1], tiling[-1][0], tiling[-1][-1]]
answer = 1
for c, _ in corners:
answer *= c
yield answer
def make_image(self, tile_options, tiling):
result = []
for row in tiling:
grids = []
for tile_id, orientation in row:
grid = tile_options[tile_id][orientation]
# Remove borders
grid = [line[1:-1] for line in grid[1: -1]]
grids.append(grid)
for y in range(len(grids[0][0])):
res_row = []
for i in range(len(grids)):
res_row.extend(grids[i][x][y] for x in range(len(grids[i])))
result.append("".join(res_row))
return result
def find_monsters(self, image):
monster_locs = []
max_x, max_y = 0, 0
for dy, line in enumerate(self.MONSTER_PATTERN):
for dx, c in enumerate(line):
if c == "#":
monster_locs.append((dx, dy))
max_x, max_y = max(dx, max_x), max((dy, max_y))
monster_tiles = set()
for y in range(len(image)):
if y + max_y >= len(image):
break
for x in range(len(image[y])):
if x + max_x >= len(image[y]):
break
has_monster = True
for dx, dy in monster_locs:
if image[y + dy][x + dx] != "#":
has_monster = False
break
if has_monster:
for dx, dy in monster_locs:
monster_tiles.add((x + dx, y + dy))
if len(monster_tiles) == 0:
return None
all_squares = set()
for y, line in enumerate(image):
for x, c in enumerate(line):
if c == "#":
all_squares.add((x, y))
return len(all_squares - monster_tiles)
def part2(self, input_data):
tile_options = {tile_id: all_options(tile) for tile_id, tile in self.tiles.items()}
tile_map = defaultdict(dict)
for tile_id, options in tile_options.items():
for index, tile in enumerate(options):
tile_map[tile_id][index] = borders(tile)
tiling = self.generate_tiling(tile_map)
image = self.make_image(tile_options, tiling)
image_options = all_options([list(line) for line in image])
answer = None
for opt in image_options:
answer = self.find_monsters(opt)
if answer is not None:
break
yield answer
|
StarcoderdataPython
|
1773694
|
<filename>chronostar/component.py
"""
Class object that encapsulates a component, the phase-space model
of an unbound set of stars formed from the same starburst/filament.
A component models the initial phase-space distribution of stars
as a Gaussian. As such there are three key attributes:
- mean: the central location
- covariance matrix: the spread in each dimension along with any correlations
- age: how long the stars have been travelling
TODO: Have actual names for parameters for clarity when logging results
"""
from __future__ import print_function, division, unicode_literals
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.stats.mstats import gmean
from . import transform
from chronostar.traceorbit import trace_cartesian_orbit
from . import traceorbit
class AbstractComponent(object):
"""
An abstract class that (when implmented) encapsulates a component,
the phase-space model of an unbound set of stars formed from the
same starburst/filament.
A component models the initial phase-space distribution of stars
as a Gaussian. As such there are three key attributes:
- mean: the central location
- covariance matrix: the spread in each dimension along with any
correlations
- age: how long the stars have been travelling
This class has been left abstract so as to easily facilitate
(and encourage) alternate parameterisations of components.
In brief, just copy-paste SphereComponent below to make your new
class, and modify the methods and attribute to suit your new
parametrisation.
In order to implement this class and make a concrete class, only
one variable must be set, and four methods implmented. In short,
the class must be told how to turn raw parameters into attributes,
and vice verse.
Attributes to set:
`PARAMETER_FORMAT`
This parameter must be set. An ordered list of labels
describing what purpose each input serves. e.g. for a
SphereComponent, the list is
3*['pos'] + 3*['vel'] + ['log_pos_std', 'log_vel_std', 'age']
See `SENSIBLE_WALKER_SPREADS` for a set of viable labels, and
include your own as needed! Note that this is the parameters in
"internal form", i.e. the form of the parameter space that
emcee explores.
Methods to define
internalise(pars) and externalise(pars)
You must tell the Component class how to switch between internal
and external formats. These methods are static because there is
not always a need to instantiate an entire Component object
simply to convert between parameter forms.
There is perhaps scope to have the Component class to intuit
how to convert between forms based on `PARAMETER_FORMAT` values.
_set_covmatrix(covmatrix=None), (REQUIRED)
_set_mean(mean=None), _set_age(age=None) (both optional)
These methods instruct the class how to (if input is None) build
the attribute from self.pars, or (if input is provided) to set
the self._mean (for e.g.) attribute but also to reverse engineer
the self.pars values and update accordingly.
These methods should only be called internally, (from the
__init__() method, or the update_attributes() method) as it is
critical to do some tidying up (setting mean_now and
covmatrix_now to None) whenever self.pars is modified.
If you stick to the convention of the mean=pars[:6] and
age=pars[-1] then the default methods will suffice and you will
only need to implement _set_covmatrix(). Of course if you wish,
you can override _set_mean() or _set_age().
"""
__metaclass__ = ABCMeta
DEFAULT_TINY_AGE = 1e-10
_pars = None
_mean = None
_covmatrix = None
_age = None
_sphere_dx = None
_sphere_dv = None
_mean_now = None
_covmatrix_now = None
# Set these in concrete class, matching form with 'SENSIBLE_WALKER_SPREADS'
# See SphereComponent and EllipComponent for examples
PARAMETER_FORMAT = None
# This is used to guide the scale of variations in each parameter
# Super useful when initialising emcee walkers across a sensible
# volume of parameter space.
SENSIBLE_WALKER_SPREADS = {
'pos':10.,
'pos_std':1.,
'log_pos_std':0.5,
'vel':2.,
'vel_std':1.,
'log_vel_std':0.5,
'corr':0.05,
'age':1.,
'angle_rad':0.25*np.pi,
'angle_deg':45.,
}
def __init__(self, pars=None, attributes=None, internal=False,
trace_orbit_func=None):
"""
An abstraction for the parametrisation of a moving group
component origin. As a 6D Gaussian, a Component has three key
attributes; mean, covariance matrix, and age. There are many
ways to parameterise a covariance matrix to various degrees
of freedom.
Parameters
----------
pars: 1D float array_like
Raw values for the parameters of the component. Can be
provided in "external" form (standard) or "internal" form
(e.g. treating standard deviations in log space to ensure
uninformed prior)
attributes: dict with all the following keys:
mean: [6] float array_like
The mean of the initial Gaussian distribution in
cartesian space:
[X(pc), Y(pc), Z(pc), U(km/s), V(km/s), W(km/s)]
covmatrix: [6,6] float array_like
the covariance matrix of the initial Gaussian
distribution, with same units as `mean`
age: float
the age of the component (positive) in millions of
years
internal: boolean {False}
If set, and if `pars` is provided, treats input pars as
internal form, and first externalises them before building
attributes.
trace_orbit_func: function {traceOrbitXYZUVW}
Function used to calculate an orbit through cartesian space
(centred on, and co-rotating with, the local standard of
rest). Function must be able to take two parameters, the
starting location and the age, with positive age
corrsponding to forward evolution, and negative age
backward evolution. It should also be "odd", i.e.:
func(loc_then, +age) = loc_now
func(loc_now, -age) = loc_then
Returns
-------
res: Component object
An astraction of a set of values parametrising the origin of
a moving group component.
"""
# Some basic implementation checks
self.check_parameter_format()
# Set cartesian orbit tracing function
if trace_orbit_func is None:
self.trace_orbit_func = trace_cartesian_orbit
else:
self.trace_orbit_func = trace_orbit_func
# If parameters are provided in internal form (the form used by emcee),
# then externalise before setting of various other attributes.
if pars is not None:
if internal:
self._pars = self.externalise(pars)
else:
self._pars = np.copy(pars)
else:
self._pars = np.zeros(len(self.PARAMETER_FORMAT))
# Age *must* be non-zero
self._set_age(self.DEFAULT_TINY_AGE)
# Using provided parameters, set up the three model attributes:
# mean, covariance and age. If attributes are provided, then use
# those.
if attributes is None:
attributes = {}
self._set_mean(attributes.get('mean', None))
self._set_covmatrix(attributes.get('covmatrix', None))
self._set_age(attributes.get('age', None))
# For some purposes (e.g. virialisation estimation) it is useful to
# approximate position and velocity volumes as spherical. Calculate
# and set those attributes.
self.set_sphere_stds()
def __str__(self):
x,y,z,u,v,w = self.get_mean_now()
return 'Currentday(' \
'X: {:.2}pc, Y: {:.2}pc, Z: {:.2}pc, ' \
'U {:.2}km/s, V {:.2}km/s, W {:.2}km/s, ' \
'age: {:.2}Myr)'.format(x,y,z,u,v,w, self._age)
def __repr__(self):
return self.__str__()
@classmethod
def check_parameter_format(cls):
"""
A check for valid implementation. If this throws an error then
the PARAMETER_FORMAT attribute has been incorrectly defined.
"""
if cls.PARAMETER_FORMAT is None:
raise NotImplementedError('Need to define PARAMETER_FORMAT '
'as a class parameter')
if not np.all(np.isin(cls.PARAMETER_FORMAT,
list(cls.SENSIBLE_WALKER_SPREADS.keys()))):
raise NotImplementedError('Label in PARAMETER_FORMAT doesn\'t '
'seem to be in SENSIBLE_WALKER_SPREADS. '
'Extend dictionary in AbstractComponent '
'accordingly: {}'.format(
cls.PARAMETER_FORMAT
))
@staticmethod
def externalise(pars):
"""
Take parameter set in internal form (as used by emcee) and
convert to external form (as used to build attributes).
Tasks
-----
There is scope to implement this here, and use cls.PARAMETER_FORMAT
to guide the parameter conversions
"""
raise NotImplementedError
@staticmethod
def internalise(pars):
"""
Take parameter set in external form (as used to build attributes)
and convert to internal form (as used by emcee).
Tasks
-----
There is scope to implement this here, and use cls.PARAMETER_FORMAT
to guide the parameter conversions
"""
raise NotImplementedError
def get_pars(self):
"""
Return a copy of the raw (external) parameterisation of
the Component
"""
return np.copy(self._pars)
def _set_mean(self, mean=None):
"""
Builds mean from self.pars. If setting from an externally
provided mean then updates self.pars for consistency
If implementation does use the first 6 values in self._pars
to set the mean then this method should be overridden.
"""
# If mean hasn't been provided, generate from self._pars
# and set.
if mean is None:
self._mean = self._pars[:6]
# If mean has been provided, reverse engineer and update
# self._pars accordingly.
else:
self._mean = np.copy(mean)
self._pars[:6] = self._mean
def get_mean(self):
"""Return a copy of the mean (initial) of the component"""
return np.copy(self._mean)
@abstractmethod
def _set_covmatrix(self, covmatrix=None):
"""
Builds covmatrix from self._pars. If setting from an externally
provided covmatrix then update self._pars for consistency.
This is the sole method that needs implmentation to build a
usable Component class
"""
pass
def get_covmatrix(self):
"""Return a copy of the covariance matrix (initial)"""
return np.copy(self._covmatrix)
def _set_age(self, age=None):
"""Builds age from self.pars. If setting from an externally
provided age then updates self.pars for consistency"""
if age is None:
self._age = self._pars[-1]
else:
self._age = age
self._pars[-1] = age
def get_age(self):
"""Returns the age of the Component"""
return self._age
def get_attributes(self):
"""
Get a dictionary of all three key attributes of the Component
model. Done this way for easy of initialising a new Component.
"""
return {'mean':self.get_mean(),
'covmatrix':self.get_covmatrix(),
'age':self.get_age()}
def set_sphere_stds(self):
"""
Set the spherical standard deviations in position space and
velocity space. Calculated in such a way so as to preserved
volume in position space and velocity space retrospectively.
Note that combined phase-space volume is not conserved by this
implementation.
"""
self._sphere_dx = gmean(np.sqrt(
np.linalg.eigvalsh(self._covmatrix[:3, :3]))
)
self._sphere_dv = gmean(np.sqrt(
np.linalg.eigvalsh(self._covmatrix[3:, 3:]))
)
def get_sphere_dx(self):
"""
Return the spherical standard deviation in position space.
First check if it is None (which may be the case if covmatrix
has been updated for e.g.) and recalculate at need.
"""
if self._sphere_dx is None:
self.set_sphere_stds()
return self._sphere_dx
def get_sphere_dv(self):
"""
Return the spherical standard deviation in velocity space.
First check if it is None (which may be the case if covmatrix
has been updated for e.g.) and recalculate at need.
"""
if self._sphere_dv is None:
self.set_sphere_stds()
return self._sphere_dv
def update_attribute(self, attributes=None):
"""
Update attributes based on input dictionary.
Parameters
----------
attributes: dict
A dictionary with the any combination (including none) of the
following:
'mean': [6] float array_like
the mean of the initial 6D Gaussian
'covmatrix': [6,6] float array_like
the covariance matrix of the initial 6D Gaussian
'age': float
the age of the component
Notes
-----
A potential source of subtle bugs is that one can modify attributes
(e.g. mean) but if `covmatrix_now` has already been calculated, it
won't update. So it is critical to use only this method to modify
attributes such that we can force the recalculation of current-day
projections as required.
"""
if type(attributes) is not dict:
raise TypeError('Attributes must be passed in as dictionary')
if 'mean' in attributes.keys():
self._set_mean(mean=attributes['mean'])
if 'covmatrix' in attributes.keys():
self._set_covmatrix(covmatrix=attributes['covmatrix'])
if 'age' in attributes.keys():
self._set_age(age=attributes['age'])
self._mean_now = None
self._covmatrix_now = None
self._sphere_dx = None
self._sphere_dv = None
def get_mean_now(self):
"""
Calculates the mean of the component when projected to the current-day
"""
if self._mean_now is None:
self._mean_now =\
self.trace_orbit_func(self._mean, times=self._age)
return self._mean_now
def get_covmatrix_now(self):
"""
Calculates covariance matrix of current day distribution.
Calculated as a first-order Taylor approximation of the coordinate
transformation that takes the initial mean to the current day mean.
This is the most expensive aspect of Chronostar, so we first make
sure the covariance matrix hasn't already been projected.
"""
if self._covmatrix_now is None:
self._covmatrix_now = transform.transform_covmatrix(
self._covmatrix, trans_func=self.trace_orbit_func,
loc=self._mean, args=(self._age,),
)
return self._covmatrix_now
def get_currentday_projection(self):
"""
Calculate (as needed) and return the current day projection of Component
Returns
-------
mean_now : [6] float array_like
The phase-space centroid of current-day Gaussian distribution of
Component
covmatrix_now : [6,6] float array_like
The phase-space covariance matrix of current-day Gaussian
distribution of Component
"""
return self.get_mean_now(), self.get_covmatrix_now()
def splitGroup(self, lo_age, hi_age):
"""
Generate two new components that share the current day mean, and
initial covariance matrix of this component but with different ages:
`lo_age` and `hi_age`.
Parameters
----------
lo_age : float
Must be a positive (and ideally smaller) value than self.age.
Serves as the age for the younger component.
hi_age : float
Must be a positive (and ideally larger) value than self.age
Serves as the age for the older component.
Returns
-------
lo_comp : Component
A component that matches `self` in current-day mean and initial
covariance matrix but with a younger age
hi_comp : Component
A component that matches `self` in current-day mean and initial
covariance matrix but with an older age
"""
comps = []
for new_age in [lo_age, hi_age]:
# Give new component identical initial covmatrix, and a initial
# mean chosen to yield identical mean_now
new_mean = self.trace_orbit_func(self.get_mean_now(),
times=-new_age)
new_comp = self.__class__(attributes={'mean':new_mean,
'covmatrix':self._covmatrix,
'age':new_age})
comps.append(new_comp)
return comps
def get_peak(self, amplitude=1.):
"""
Get the density at the peak of distribution.
Use this as a proxy of the characteristic density of the distribution,
with the option to scale by the amplitude of the Gaussian. Note, the
height of the peak is only dependent on the covariance matrix.
"""
expon = 0 # because we are evaluating the ditribution *at* the mean
det = np.linalg.det(self.get_covmatrix_now())
coeff = 1./np.sqrt( (2*np.pi)**6 * det)
return amplitude * coeff * np.exp(expon)
@staticmethod
def load_components(filename):
"""
Load Component objects from a *.npy file.
Used to standardise result if loading a single component vs multiple
components.
Parameters
----------
filename : str
name of the stored file
Returns
-------
res : [Component] list
A list of Component objects
"""
res = np.load(filename)
if res.shape == ():
return np.array([res.item()])
else:
return res
@classmethod
def get_sensible_walker_spread(cls):
"""Get an array of sensible walker spreads (based on class
constants `PARAMTER_FORMAT` and `SENSIBLE_WALKER_SPREADS` to
guide emcee in a sensible starting range of parameters."""
sensible_spread = []
for par_form in cls.PARAMETER_FORMAT:
sensible_spread.append(cls.SENSIBLE_WALKER_SPREADS[par_form])
return np.array(sensible_spread)
class SphereComponent(AbstractComponent):
PARAMETER_FORMAT = ['pos', 'pos', 'pos', 'vel', 'vel', 'vel',
'log_pos_std', 'log_vel_std',
'age']
@staticmethod
def externalise(pars):
"""
Take parameter set in internal form (as used by emcee) and
convert to external form (as used to build attributes).
"""
extern_pars = np.copy(pars)
extern_pars[6:8] = np.exp(extern_pars[6:8])
return extern_pars
@staticmethod
def internalise(pars):
"""
Take parameter set in external form (as used to build attributes)
and convert to internal form (as used by emcee).
"""
intern_pars = np.copy(pars)
intern_pars[6:8] = np.log(intern_pars[6:8])
return intern_pars
def _set_covmatrix(self, covmatrix=None):
"""Builds covmatrix from self.pars. If setting from an externally
provided covariance matrix then updates self.pars for consistency"""
# If covmatrix hasn't been provided, generate from self._pars
# and set.
if covmatrix is None:
dx = self._pars[6]
dv = self._pars[7]
self._covmatrix = np.identity(6)
self._covmatrix[:3, :3] *= dx ** 2
self._covmatrix[3:, 3:] *= dv ** 2
# If covmatrix has been provided, reverse engineer the most
# suitable set of parameters and update self._pars accordingly
# (e.g. take the geometric mean of the (square-rooted) velocity
# eigenvalues as dv, as this at least ensures constant volume
# in velocity space).
else:
self._covmatrix = np.copy(covmatrix)
dx = gmean(np.sqrt(
np.linalg.eigvalsh(self._covmatrix[:3, :3]))
)
dv = gmean(np.sqrt(
np.linalg.eigvalsh(self._covmatrix[3:, 3:]))
)
self._pars[6] = dx
self._pars[7] = dv
self.set_sphere_stds()
class EllipComponent(AbstractComponent):
PARAMETER_FORMAT = ['pos', 'pos', 'pos', 'vel', 'vel', 'vel',
'log_pos_std', 'log_pos_std', 'log_pos_std',
'log_vel_std',
'corr', 'corr', 'corr',
'age']
@staticmethod
def externalise(pars):
"""
Take parameter set in internal form (as used by emcee) and
convert to external form (as used to build attributes).
"""
extern_pars = np.copy(pars)
extern_pars[6:10] = np.exp(extern_pars[6:10])
return extern_pars
@staticmethod
def internalise(pars):
"""
Take parameter set in external form (as used to build attributes)
and convert to internal form (as used by emcee).
"""
intern_pars = np.copy(pars)
intern_pars[6:10] = np.log(intern_pars[6:10])
return intern_pars
def _set_covmatrix(self, covmatrix=None):
"""Builds covmatrix from self.pars. If setting from an externally
provided covariance matrix then updates self.pars for consistency"""
# If covmatrix hasn't been provided, generate from self._pars
# and set.
if covmatrix is None:
dx, dy, dz = self._pars[6:9]
dv = self._pars[9]
c_xy, c_xz, c_yz = self._pars[10:13]
self._covmatrix = np.array([
[dx**2, c_xy*dx*dy, c_xz*dx*dz, 0., 0., 0.],
[c_xy*dx*dy, dy**2, c_yz*dy*dz, 0., 0., 0.],
[c_xz*dx*dz, c_yz*dy*dz, dz**2, 0., 0., 0.],
[0., 0., 0., dv**2, 0., 0.],
[0., 0., 0., 0., dv**2, 0.],
[0., 0., 0., 0., 0., dv**2],
])
# If covmatrix has been provided, reverse engineer the most
# suitable set of parameters and update self._pars accordingly
# (e.g. take the geometric mean of the (square-rooted) velocity
# eigenvalues as dv, as this at least ensures constant volume
# in velocity space).
else:
self._covmatrix = np.copy(covmatrix)
pos_stds = np.sqrt(np.diagonal(self._covmatrix[:3, :3]))
dx, dy, dz = pos_stds
pos_corr_matrix = (self._covmatrix[:3, :3]
/ pos_stds
/ pos_stds.reshape(1,3).T)
c_xy, c_xz, c_yz = pos_corr_matrix[np.triu_indices(3,1)]
dv = gmean(np.sqrt(
np.linalg.eigvalsh(self._covmatrix[3:, 3:]))
)
self._pars[6:9] = dx, dy, dz
self._pars[9] = dv
self._pars[10:13] = c_xy, c_xz, c_yz
|
StarcoderdataPython
|
3589645
|
<gh_stars>1-10
#!/usr/bin/python
# Finds vulnerabilites in manifest files
import sys
from xml.dom.minidom import Element
from androguard.core.bytecodes import apk
from androguard.core.bytecodes import dvm
import permissions
# Component Types Enum
ACTIVITY = 0
SERVICE = 1
RECEIVER = 2
PROVIDER = 3
tag2type = { "activity":ACTIVITY,
"activity-alias":ACTIVITY,
"service":SERVICE,
"receiver":RECEIVER,
"provider":PROVIDER}
type2tag = { ACTIVITY:"activity",
SERVICE:"service",
RECEIVER:"receiver",
PROVIDER:"provider"}
type2methods = { ACTIVITY: ["onCreate"],
SERVICE: ["onStartCommand", "onBind"],
RECEIVER: ["onReceive"],
PROVIDER: []}
class Component:
def __init__(self, element, perms, perm=None):
self.element = element
self.type = tag2type[element.tagName]
if self.element.tagName == "activity-alias":
self.name = self.element.getAttribute("android:targetActivity")
else:
self.name = self.element.getAttribute("android:name")
self.path = '/'.join(self.name.split('.'))+";"
self.perm_level = None
self.perm = None
if self.element.hasAttribute("android:permission"):
self.perm = self.element.getAttribute("android:permission")
elif perm:
self.perm = perm
if self.perm:
perm_key = self.perm.split(".")[-1]
if perms.has_key(perm_key):
self.perm_level = perms[perm_key]
else:
print "unknown perm %s(%s)" % (perm_key, self.perm)
self.perm_level = permissions.SIGSYS
def __repr__(self):
return "<"+type2tag[self.type] + " " + self.name + ">"
def is_public(self):
exported_set = self.element.hasAttribute("android:exported")
exported = False
if exported_set:
exported = self.element.getAttribute("android:exported") == "true"
has_filter = False
if self.element.hasChildNodes():
for child in [c for c in self.element.childNodes if isinstance(c,Element)]:
has_filter = has_filter or child.tagName == "intent-filter"
# See http://developer.android.com/guide/topics/manifest/service-element.html#exported
if has_filter:
if exported_set: return exported
else: return True
else:
if exported_set: return exported
else: return False
def is_exploitable(self):
if self.perm:
return self.is_public() and self.perm_level<=permissions.DANG
else:
return self.is_public()
def cleanup_attributes(a, element):
if isinstance(element,Element):
for tag in ["android:name", "android:targetActivity"]:
if element.hasAttribute(tag):
name_attr = element.getAttributeNode(tag)
name_attr.value = a.format_value(name_attr.value)
if element.hasChildNodes():
for e in element.childNodes:
cleanup_attributes(a, e)
def extract_perms(manifest):
new_perms = {}
for p in manifest.getElementsByTagName("permission"):
perm = p.getAttribute("android:name")
level = permissions.NORMAL
if p.hasAttribute("android:protectionLevel"):
attr_level = p.getAttribute("android:protectionLevel")
try:
l = str(eval(attr_level))
attr_level = l
except Exception:
pass
level = permissions.text2perm[attr_level]
new_perms[perm.split(".")[-1]] = level
return new_perms
def get_exploitable_methods(a, d, perms):
xml = a.get_AndroidManifest()
cleanup_attributes(a,xml.documentElement)
perms.update(extract_perms(xml))
app = xml.getElementsByTagName("application")[0]
app_perm = None
if app.hasAttribute("android:permission"):
app_perm = activity.getAttribute("android:permission")
components = []
for comp_name in tag2type.keys():
for item in xml.getElementsByTagName(comp_name):
comp = Component(item, perms, app_perm)
if comp.is_exploitable():
components.append(comp)
#print components
classes = d.get_classes()
# Possible way of finding programmatically created receivers?
#[p.get_src(d.CM) for p in dx.get_tainted_packages().search_methods("content/Context","registerReceiver",".")]
exploitable_methods = []
for comp in components:
c_objects = [k for k in classes if k.get_name().count(comp.path) > 0]
if len(c_objects) != 1:
print "oh no! Found %d classes for component %s" % (len(c_objects), comp.name)
continue
c_obj = c_objects[0]
# TODO: perhaps we need to look for methods in superclass? For example:
# BitCoin Wallet app has receiver
# de.schildbach.wallet.WalletBalanceWidgetProvider
# which subclasses android.appwidget.AppWidgetProvider, which is where
# the onReceive method is implemented...
method_objects = [m for m in c_obj.get_methods() if m.get_name() in type2methods[comp.type]]
exploitable_methods = exploitable_methods + [(comp,m) for m in method_objects]
#print [m[1].get_name() for m in exploitable_methods]
# Links to check out:
# http://developer.android.com/guide/topics/manifest/provider-element.html#gprmsn
# http://developer.android.com/guide/topics/manifest/data-element.html
# https://developer.android.com/guide/topics/security/permissions.html#enforcement
return exploitable_methods
if __name__ == "__main__" :
get_exploitable_methods(apk.APK(sys.argv[1]), dvm.DalvikVMFormat(a.get_dex()),permissions.permissions)
|
StarcoderdataPython
|
5197057
|
#!/usr/bin/env python3
"""
This script collects any vulnerabilities associated with the five C/C++ projects by scraping the CVE Details website.
This information includes the CVE identifier, publish date, CVSS score, various impacts, vulnerability types, the CWE ID, and
the URLs to other relevant websites like a project's Bugzilla or Security Advisory platforms.
For each project, this information is saved to a CSV file.
"""
import csv
import os
from modules.common import log
from modules.project import Project
####################################################################################################
project_list = Project.get_project_list_from_config()
Project.debug_ensure_all_project_repositories_were_loaded(project_list)
for project in project_list:
CSV_HEADER = [
'CVE', 'CVE URL',
'Publish Date', 'Last Update Date',
'CVSS Score', 'Confidentiality Impact', 'Integrity Impact',
'Availability Impact', 'Access Complexity', 'Authentication',
'Gained Access', 'Vulnerability Types', 'CWE',
'Affected Product Versions',
'Bugzilla URLs', 'Bugzilla IDs',
'Advisory URLs', 'Advisory IDs', 'Advisory Info',
'Git URLs', 'Git Commit Hashes',
'SVN URLs', 'SVN Revision Numbers'
]
project.create_output_subdirectory()
output_csv_path = project.get_base_output_csv_path('cve')
with open(output_csv_path, 'w', newline='') as csv_file:
csv_writer = csv.DictWriter(csv_file, fieldnames=CSV_HEADER)
csv_writer.writeheader()
for cve in project.scrape_vulnerabilities_from_cve_details():
cve.serialize_containers()
csv_row = {
'CVE': cve.id, 'CVE URL': cve.url,
'Publish Date': cve.publish_date, 'Last Update Date': cve.last_update_date,
'CVSS Score': cve.cvss_score, 'Confidentiality Impact': cve.confidentiality_impact, 'Integrity Impact': cve.integrity_impact,
'Availability Impact': cve.availability_impact, 'Access Complexity': cve.access_complexity, 'Authentication': cve.authentication,
'Gained Access': cve.gained_access, 'Vulnerability Types': cve.vulnerability_types, 'CWE': cve.cwe,
'Affected Product Versions': cve.affected_products,
'Bugzilla URLs': cve.bugzilla_urls, 'Bugzilla IDs': cve.bugzilla_ids,
'Advisory URLs': cve.advisory_urls, 'Advisory IDs': cve.advisory_ids, 'Advisory Info': cve.advisory_info,
'Git URLs': cve.git_urls, 'Git Commit Hashes': cve.git_commit_hashes,
'SVN URLs': cve.svn_urls, 'SVN Revision Numbers': cve.svn_revision_numbers
}
csv_writer.writerow(csv_row)
log.info(f'Finished running for the project "{project}".')
log.info('Finished running.')
print('Finished running.')
|
StarcoderdataPython
|
1788802
|
#!/usr/bin/env python
#
# Copyright (c) 2015, Arista Networks, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - Neither the name of Arista Networks nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ARISTA NETWORKS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#pylint: disable=R0904,F0401
import unittest
from client_test_lib import Bootstrap
from client_test_lib import raise_exception
class XmppConfigTest(unittest.TestCase):
@classmethod
def bootstrap(cls, xmpp):
bootstrap = Bootstrap()
bootstrap.ztps.set_config_response(xmpp=xmpp)
bootstrap.ztps.set_node_check_response()
bootstrap.ztps.set_definition_response()
bootstrap.start_test()
return bootstrap
def xmpp_sanity_test(self, xmpp):
bootstrap = self.bootstrap(xmpp)
try:
self.failUnless(bootstrap.eapi_node_information_collected())
self.failUnless(bootstrap.missing_startup_config_failure())
self.failIf(bootstrap.error)
self.failIf('XmppClient' not in bootstrap.output)
except AssertionError as assertion:
print 'Output: %s' % bootstrap.output
print 'Error: %s' % bootstrap.error
raise_exception(assertion)
finally:
bootstrap.end_test()
def test_full(self):
self.xmpp_sanity_test({'server' : 'test-server',
'port' : 112233,
'username' : 'test-username',
'password' : '<PASSWORD>',
'domain' : 'test-domain',
'rooms' : ['test-room-1', 'test-room-2']})
def test_msg_type_debug(self):
self.xmpp_sanity_test({'server' : 'test-server',
'port' : 112233,
'username' : 'test-username',
'password' : '<PASSWORD>',
'domain' : 'test-domain',
'rooms' : ['test-room-1', 'test-room-2'],
'msg_type' : 'debug'})
def test_msg_type_info(self):
self.xmpp_sanity_test({'server' : 'test-server',
'port' : 112233,
'username' : 'test-username',
'password' : '<PASSWORD>',
'domain' : 'test-domain',
'rooms' : ['test-room-1', 'test-room-2'],
'msg_type' : 'debug'})
def test_partial(self):
self.xmpp_sanity_test({'rooms' : ['test-room-1'],
'username' : 'test-username',
'password' : '<PASSWORD>',
'domain' : 'test-domain'})
def test_erroneous_msg_type(self):
bootstrap = self.bootstrap({'server' : 'test-server',
'port' : 112233,
'username' : 'test-username',
'password' : '<PASSWORD>',
'domain' : 'test-domain',
'rooms' : ['test-room-1', 'test-room-2'],
'msg_type' : 'bogus'})
try:
self.failUnless(bootstrap.eapi_node_information_collected())
self.failUnless(bootstrap.missing_startup_config_failure())
self.failIf(bootstrap.error)
self.failIf('XMPP configuration failed because of '
'unexpected \'msg_type\''
not in bootstrap.output)
except AssertionError as assertion:
print 'Output: %s' % bootstrap.output
print 'Error: %s' % bootstrap.error
raise_exception(assertion)
finally:
bootstrap.end_test()
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
9786901
|
import numpy as np
import tensorflow as tf
from config import MovieQAPath
from legacy.input import Input
_mp = MovieQAPath()
hp = {'emb_dim': 300, 'feat_dim': 512,
'learning_rate': 10 ** (-4), 'decay_rate': 0.97, 'decay_type': 'exp', 'decay_epoch': 2,
'opt': 'adam', 'checkpoint': '', 'dropout_rate': 0.1, 'pos_len': 35}
def dropout(x, training):
return tf.layers.dropout(x, hp['dropout_rate'], training=training)
def make_mask(x, length):
return tf.tile(tf.expand_dims(tf.sequence_mask(x, maxlen=length),
axis=-1), [1, 1, hp['emb_dim']])
def sliding(x):
return tf.nn.pool(x, [3], 'AVG', 'SAME', data_format='NWC')
def seq_mean(x, l):
return tf.reduce_sum(x, axis=1) / tf.to_float(tf.expand_dims(l, axis=-1))
class Model(object):
def __init__(self, data, training=False):
self.data = data
self.initializer = tf.glorot_normal_initializer()
q_mask = make_mask(self.data.ql, 25) # (1, L_q, E)
s_mask = make_mask(self.data.sl, 29) # (N, L_s, E)
a_mask = make_mask(self.data.al, 34) # (5, L_a, E)
ques_shape = tf.shape(q_mask)
subt_shape = tf.shape(s_mask)
ans_shape = tf.shape(a_mask)
with tf.variable_scope('Embedding'):
self.embedding = tf.get_variable('embedding_matrix',
initializer=np.load(_mp.embedding_file), trainable=False)
self.ques = tf.nn.embedding_lookup(self.embedding, self.data.ques) # (1, L_q, E)
self.ans = tf.nn.embedding_lookup(self.embedding, self.data.ans) # (5, L_a, E)
self.subt = tf.nn.embedding_lookup(self.embedding, self.data.subt) # (N, L_s, E)
# self.ques = tf.layers.dropout(self.ques, hp['dropout_rate'], training=training) # (1, L_q, E)
# self.ans = tf.layers.dropout(self.ans, hp['dropout_rate'], training=training) # (5, L_a, E)
# self.subt = tf.layers.dropout(self.subt, hp['dropout_rate'], training=training) # (N, L_s, E)
with tf.variable_scope('Embedding_Linear'):
self.ques_embedding = self.embedding_linear(self.ques, q_mask, 'question') # (1, L_q, E_t)
self.ans_embedding = self.embedding_linear(self.ans, a_mask, 'answer') # (5, L_a, E_t)
self.subt_embedding = self.embedding_linear(self.subt, s_mask, 'subtitle') # (N, L_s, E_t)
with tf.variable_scope('Language_Attention'):
position_attn = tf.get_variable('position_attention', shape=[hp['pos_len'], hp['emb_dim']],
initializer=self.initializer, trainable=False)
ques_pos, _ = tf.split(position_attn, [25, hp['pos_len'] - 25])
ans_pos, _ = tf.split(position_attn, [34, hp['pos_len'] - 34])
subt_pos, _ = tf.split(position_attn, [29, hp['pos_len'] - 29])
self.one_word_encodes = [self.subt_embedding]
ques_enc = seq_mean(self.ques_embedding * ques_pos, self.data.ql) # (1, E_t)
for i in range(2):
with tf.variable_scope('OneWord_Attention_%d' % i):
subt_enc = seq_mean(self.one_word_encodes[-1] * subt_pos, self.data.sl) # (N, E_t)
subt_pre_attn = tf.nn.tanh(
self.dense_wo_everything(ques_enc) +
self.dense_wo_everything(subt_enc) +
self.dense_wo_everything(tf.reduce_mean(subt_enc, axis=0, keepdims=True))) # (N, E_t)
subt_pre_attn = tf.expand_dims(
tf.einsum('ijk,ik->ij', self.one_word_encodes[-1], subt_pre_attn),
axis=-1) # (N, L_s, 1)
subt_attn = tf.nn.softmax(subt_pre_attn, axis=1) # (N, L_s, 1)
self.one_word_encodes.append(self.one_word_encodes[-1] * (1 + subt_attn)) # (N, L_s, E_t)
self.one_word_mean = tf.concat([tf.expand_dims(t, axis=0)
for t in self.one_word_encodes],
axis=0) # (3, N, L_s, E_t)
self.one_word_weight = tf.reshape(
tf.nn.softmax(
tf.layers.dense(ques_enc, 3, kernel_initializer=self.initializer),
axis=-1),
[3, 1, 1, 1]) # (3, 1, 1, 1)
self.one_word_mean = tf.transpose(
tf.reduce_sum(
tf.reduce_sum(self.one_word_mean * self.one_word_weight, axis=0),
axis=1, keepdims=True)
/ tf.to_float(tf.reshape(self.data.sl, [-1, 1, 1])),
[1, 0, 2]) # (1, N, E_t)
self.pool_subt = sliding(self.subt_embedding * subt_pos) # (N, L_s, E_t)
self.tri_word_encodes = [self.pool_subt]
for i in range(2):
with tf.variable_scope('TriWord_Attention_%d' % i):
pool_subt_enc = seq_mean(self.tri_word_encodes[-1], self.data.sl) # (N, E_t)
pool_subt_pre_attn = tf.nn.tanh(
self.dense_wo_everything(ques_enc) +
self.dense_wo_everything(pool_subt_enc) +
self.dense_wo_everything(tf.reduce_mean(pool_subt_enc, axis=0, keepdims=True))) # (N, E_t)
pool_subt_pre_attn = tf.expand_dims(
tf.einsum('ijk,ik->ij', self.tri_word_encodes[-1], pool_subt_pre_attn),
axis=-1) # (N, L_s, 1)
pool_subt_attn = tf.nn.softmax(pool_subt_pre_attn, axis=1) # (N, L_s, 1)
self.tri_word_encodes.append(self.tri_word_encodes[-1] * (1 + pool_subt_attn)) # (N, L_s, E_t)
self.tri_word_mean = tf.concat([tf.expand_dims(t, axis=0)
for t in self.tri_word_encodes],
axis=0) # (3, N, L_s, E_t)
self.tri_word_weight = tf.reshape(
tf.nn.softmax(
tf.layers.dense(ques_enc, 3, kernel_initializer=self.initializer),
axis=-1),
[3, 1, 1, 1]) # (3, 1, 1, 1)
self.tri_word_mean = tf.transpose(
tf.reduce_sum(
tf.reduce_sum(self.tri_word_mean * self.tri_word_weight, axis=0),
axis=1, keepdims=True)
/ tf.to_float(tf.reshape(self.data.sl, [-1, 1, 1])),
[1, 0, 2]) # (1, N, E_t)
tile_ques_enc = tf.tile(tf.expand_dims(ques_enc, axis=0), [1, subt_shape[0], 1]) # (1, N, E_t)
self.one_concat = tf.concat([self.one_word_mean, tile_ques_enc], axis=-1) # (1, N, 2 * E_t)
self.tri_concat = tf.concat([self.tri_word_mean, tile_ques_enc], axis=-1) # (1, N, 2 * E_t)
with tf.variable_scope('Temporal_Attention'):
self.temp_one_attn = tf.nn.softmax(
tf.layers.conv1d(
tf.layers.conv1d(self.one_concat, hp['emb_dim'] * 2, 3, padding='same', activation=tf.nn.relu),
1, 5, padding='same', activation=None),
axis=1) # (1, N, 1)
self.temp_tri_attn = tf.nn.softmax(
tf.layers.conv1d(
tf.layers.conv1d(self.tri_concat, hp['emb_dim'] * 2, 3, padding='same', activation=tf.nn.relu),
1, 5, padding='same', activation=None
),
axis=1) # (1, N, 1)
self.temp_one = tf.reduce_sum(self.one_word_mean * self.temp_one_attn, axis=1) # (1, E_t)
self.temp_tri = tf.reduce_sum(self.tri_word_mean * self.temp_tri_attn, axis=1) # (1, E_t)
self.temp_weight = tf.transpose(tf.nn.softmax(
tf.layers.dense(ques_enc, 3, kernel_initializer=self.initializer), axis=-1)) # (3, 1)
self.ans_vec = tf.concat([self.temp_one, self.temp_tri, ques_enc], axis=0) * self.temp_weight
self.ans_vec = tf.tile(tf.reduce_sum(self.ans_vec, axis=0, keepdims=True), [5, 1])
ans_enc = seq_mean(self.ans_embedding * ans_pos, self.data.al)
self.output = tf.reduce_sum(self.ans_vec * ans_enc, axis=1)
def embedding_linear(self, x, x_mask, scope):
with tf.variable_scope(scope):
x = tf.layers.dense(x, hp['emb_dim'] * 4, activation=tf.nn.relu, kernel_initializer=self.initializer)
x = tf.layers.dense(x, hp['emb_dim'], kernel_initializer=self.initializer)
zeros = tf.zeros_like(x)
x = tf.where(x_mask, x, zeros)
return x
def dense_wo_everything(self, x):
return tf.layers.dense(x, hp['emb_dim'], use_bias=False, kernel_initializer=self.initializer)
def main():
data = Input(split='train')
model = Model(data)
for v in tf.global_variables():
print(v)
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
# config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
with tf.Session(config=config) as sess:
sess.run([model.data.initializer, tf.global_variables_initializer()], )
# q, a, s = sess.run([model.ques_enc, model.ans_enc, model.subt_enc])
# print(q.shape, a.shape, s.shape)
# a, b, c, d = sess.run(model.tri_word_encodes)
# print(a, b, c, d)
# print(a.shape, b.shape, c.shape, d.shape)
a, b = sess.run([model.ans_vec, model.output])
print(a, b)
print(a.shape, b.shape)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1641636
|
<filename>gps/pasing/google_parser.py
import datetime
import json
import pickle
import time as t
from pathlib import Path
url = "/Users/rohit/Downloads/Takeout/Location_History/Location_History.json"
class Location:
latitude = None
longitude = None
date = None
milisec = None
accuracy = None
def __init__(self, loc=None,date=None):
if loc is not None:
self.lat = loc['latitudeE7']
self.long = loc['longitudeE7']
self.accuracy = loc['accuracy']
self.milisec = int(loc["timestampMs"])
time = int(loc["timestampMs"]) / 1000
date = datetime.datetime.fromtimestamp(
int(time)
)
if self.lat is not None and self.long is not None and date is not None:
self.longitude = self.long
self.latitude = self.lat
self.date = date
else:
raise Exception("Invalid Data for creating Location object {} - {} - {}".format(self.lat, self.long, self.date))
return
elif date is not None:
self.lat= 0
self.long = 0
self.accuracy = 0
self.milisec = int(t.mktime(date.timetuple()) * 1e3 + date.microsecond / 1e3)
self.date = date
else:
raise Exception("Invalid Data for creating Location object")
def __str__(self):
return str("Time : {} - Latitude : {} - Longitude : {}".format(self.milisec,self.latitude,self.longitude))
def __repr__(self):
return str("Time : {} - Latitude : {} - Longitude : {}".format(self.milisec, self.latitude, self.longitude))
class DataModel:
path = None
def __init__(self, path=url,preloded=False):
if preloded is True:
return
if path is None or len(path) == 0:
raise Exception("Invalid File path, ensure the corrected path")
my_file = Path(path)
if my_file.exists() is False:
raise Exception("Invalid File path, {} check if it exist".format(path))
else:
self.path = path
def load_data_array(self):
with open(self.path) as data_file:
json_data = json.load(data_file)
data = []
json_data = json_data["locations"]
print(len(json_data))
for i in range(len(json_data)):
data.append(Location(json_data[i]))
print("Loading {}".format(i))
pickle.dump(data, open("locations.p", "wb"))
def load_data_map(self):
with open(self.path) as data_file:
json_data = json.load(data_file)
loc_map = {}
json_data = json_data["locations"]
print(len(json_data))
for i in range(len(json_data)):
loc = Location(json_data[i])
key = loc.date.strftime('%Y-%m-%d')
if key in loc_map:
loc_map[key].append(loc)
else:
loc_map[key] = [loc]
print("Found {} - {}".format(i, loc))
pickle.dump(loc_map, open("data/locations_map.p", "wb"))
def get_location_map(self):
with open("data/locations_map.p", "rb") as input_file:
return pickle.load(input_file)
|
StarcoderdataPython
|
226300
|
# coding: utf-8
# Copyright (c) 2016, <NAME> (alexpirine), 2016
import itertools
import re
import sudokumaker
from django.contrib import messages
from django.shortcuts import render
from sudoku import SudokuProblem
from . import forms
# Create your views here.
def home(request):
action = request.POST.get('action', None)
suggest = request.GET.get('suggest', False)
matrix = request.POST.getlist('matrix')
solution = None
solved = False
initial_data = None
if suggest:
matrix = sudokumaker.make_problem()
initial_data = [{'value': v or None} for k, v in enumerate(itertools.chain.from_iterable(matrix))]
form = forms.SudokuForm(request.POST or None, initial=initial_data)
if action == 'solve' and form.is_valid():
user_data = [v['value'] or 0 for v in form.cleaned_data]
matrix = [user_data[i:i+9] for i in xrange(0, len(user_data), 9)]
problem = SudokuProblem(matrix)
solution = problem.solve()
if solution:
initial_data = [{'value': v} for k, v in enumerate(itertools.chain.from_iterable(solution.matrix))]
form = forms.SudokuForm(initial=initial_data)
for k, f in enumerate(form):
if not bool(user_data[k]):
f.fields['value'].widget.attrs['class'] += ' text-success'
else:
f.fields['value'].widget.attrs['class'] += ' font-bold'
solved = True
c = {
'action': action,
'form': form,
'solved': solved,
}
return render(request, 'home.html', c)
|
StarcoderdataPython
|
5051666
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 30 08:48:06 2021
@author: u0139894
"""
import numpy as np
import os
class Model:
def __init__(self, modelPath, initial):
self.modelPath = modelPath
self.initial = initial
self.__getMets()
self.__getReacs()
def __getMets(self):
mets = {}
with open(os.path.join(self.modelPath, 'metabolites.txt')) as f:
f.readline()
for line in f:
a = line.strip().split('\t')
if int(a[3]):
mets[a[0]] = (a[1], int(a[2]))
else:
mets[self.initial+a[0]] = (a[1], int(a[2]))
with open(os.path.join(self.modelPath, 'enzymes.txt')) as f:
f.readline()
for line in f:
a = line.strip().split('\t')
mets[self.initial + a[0] + '_a'] = (a[1].upper() + '(free)', float(a[2]))
mets[self.initial + a[0] + '_b'] = (a[1].upper() + '(conj)', 0)
self.mets = mets
def __makeString(self, st):
sc = st.replace('"','')
r = sc.split(',')
rs = ''
c = 0
for i in r:
a = i.split(':')
if c==0:
rs += str('(' + a[1] + ') ')
else:
rs+=' + ' + '(' + a[1] + ') '
c+=1
if a[0].replace(' ','') in self.mets:
rs += self.mets[a[0].replace(' ','')][0]
else:
rs += self.mets[self.initial + a[0].replace(' ','')][0]
return rs
def __makeReacDict(self, st):
d= {}
sc = st.replace('"','')
r = sc.split(',')
for i,v in enumerate(r):
a = v.split(':')
met = a[0].replace(' ','')
if self.initial + met in self.mets:
met = self.initial + met
d[met] = float(a[1])
return d
def __getReacs(self):
reacs = {}
with open(os.path.join(self.modelPath, 'reactions.txt')) as f:
f.readline()
for line in f:
a = line.strip().split('\t')
#conjugate
reaction = a[0] + '_a'
reactants = a[1] + ',' + a[3] + '_a:1'
products = a[3] + '_b:1'
reacs[reaction] = {'reactants':self.__makeReacDict(reactants), 'products': self.__makeReacDict(products), 'string':self.__makeString(reactants) + ' => ' + self.__makeString(products), 'rate':float(a[4])}
#react
reaction = a[0] + '_b'
reactants = a[3] + '_b:1'
products = a[2] + ',' + a[3] + '_a:1'
reacs[reaction] = {'reactants':self.__makeReacDict(reactants), 'products': self.__makeReacDict(products), 'string':self.__makeString(reactants) + ' => ' + self.__makeString(products), 'rate':float(a[4])}
#rev conjugate
reaction = a[0] + '_c'
reactants = a[2] + ',' + a[3] + '_a:1'
products = a[3] + '_b:1'
reacs[reaction] = {'reactants':self.__makeReacDict(reactants), 'products': self.__makeReacDict(products), 'string':self.__makeString(reactants) + ' => ' + self.__makeString(products), 'rate':float(a[5])}
#rev react
reaction = a[0] + '_d'
reactants = a[3] + '_b:1'
products = a[1] + ',' + a[3] + '_a:1'
reacs[reaction] = {'reactants':self.__makeReacDict(reactants), 'products': self.__makeReacDict(products), 'string':self.__makeString(reactants) + ' => ' + self.__makeString(products), 'rate':float(a[5])}
self.reacs = reacs
#M = Model('C:/Users/u0139894/Documents/BH_EnergyMetabolism/files/gillespie/autotrophic', 'aut_')
# with open('C:/Users/u0139894/Documents/BH_EnergyMetabolism/files/gillespie/reactions.txt') as f:
# with open('C:/Users/u0139894/Documents/BH_EnergyMetabolism/files/gillespie/reactions2.txt', 'w') as f2:
# f2.write(f.readline())
# for line in f:
# a=line.strip().split('\t')
# f2.write(line.strip() + '\t' + M.reacs[a[0]] + '\n')
|
StarcoderdataPython
|
1759033
|
# -*- coding: utf-8 -*-
import time
from pykinect2 import PyKinectV2
from pykinect2.PyKinectV2 import *
from pykinect2 import PyKinectRuntime
from Kinetic import extractPoints
from numpy import *
#import pyttsx
k = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Body)
print "Kinect lance"
#e = pyttsx.init()
#e.say('Bonjour et bienvenu dans la prossaidure de calibration de la machine vivante. Une personne doit se mettre debout au centre de la saine, face public, les bras ecartai comme jaizu cri. et une autre personne est praite a tourner la Kinect selon l''axe Z. Tenez vous prai dans dix, neuf, huit, sept, six, cinq, quatre, trois, deux, un.')
#e.runAndWait()
calib = True
while calib :
time.sleep(0.1)
seeBody = False
if k.has_new_body_frame():
bs = k.get_last_body_frame()
tiltrad = arctan(bs.floor_clip_plane.z/bs.floor_clip_plane.y)
w = bs.floor_clip_plane.w
#print tiltrad*180.0/pi,w
if bs is not None:
for b in bs.bodies:
if not b.is_tracked:
continue
# get joints positions
js = b.joints
kpos = extractPoints(js,tiltrad,w,0.0,0.0)
if kpos["spine_base"][1]>0.05:
# e.say(u'tourner la kinect un peu a droite!')
# e.runAndWait()
print(u'tourner la kinect un peu a droite!')
elif kpos["spine_base"][1]<-0.05:
# e.say(u'tourner la kinect un peu a gauche!')
# e.runAndWait()
print(u'tourner la kinect un peu a gauche!')
else:
# e.say('c''est bon ne touchez plus la Kinect, tout est calibrai. Merci de votre devoumain')
# e.runAndWait()
print('c''est bon ne touchez plus la Kinect, tout est calibrai. Merci de votre devoumain')
print "rtip"
print kpos["r_tip"]
print "ltip"
print kpos["l_tip"]
print "spine"
print kpos["spine_base"]
print "tilt"
print tiltrad*180.0/pi
print "hkinect"
print w
print "dkinect"
print -kpos["spine_base"][0]
print "pan"
print arctan((kpos["r_tip"][0]-kpos["l_tip"][0])/(kpos["r_tip"][1]-kpos["l_tip"][1]))*180.0/pi
calib = False
|
StarcoderdataPython
|
43508
|
<reponame>ekkipermana/robotframework-test
"""
dexml: a dead-simple Object-XML mapper for Python
Let's face it: xml is a fact of modern life. I'd even go so far as to say
that it's *good* at what is does. But that doesn't mean it's easy to work
with and it doesn't mean that we have to like it. Most of the time, XML
just needs to get the hell out of the way and let you do some actual work
instead of writing code to traverse and manipulate yet another DOM.
The dexml module takes the obvious mapping between XML tags and Python objects
and lets you capture that as cleanly as possible. Loosely inspired by Django's
ORM, you write simple class definitions to define the expected structure of
your XML document. Like so:
>>> import dexml
>>> from dexml import fields
>>> class Person(dexml.Model):
... name = fields.String()
... age = fields.Integer(tagname='age')
Then you can parse an XML document into an object like this:
>>> p = Person.parse("<Person name='<NAME>'><age>42</age></Person>")
>>> p.name
u'<NAME>'
>>> p.age
42
And you can render an object into an XML document like this:
>>> p = Person(name="<NAME>",age=36)
>>> p.render()
'<?xml version="1.0" ?><Person name="<NAME>"><age>36</age></Person>'
Malformed documents will raise a ParseError:
>>> p = Person.parse("<Person><age>92</age></Person>")
Traceback (most recent call last):
...
ParseError: required field not found: 'name'
Of course, it gets more interesting when you nest Model definitions, like this:
>>> class Group(dexml.Model):
... name = fields.String(attrname="name")
... members = fields.List(Person)
...
>>> g = Group(name="Monty Python")
>>> g.members.append(Person(name="<NAME>",age=69))
>>> g.members.append(Person(name="<NAME>",age=67))
>>> g.render(fragment=True)
'<Group name="<NAME>"><Person name="<NAME>"><age>69</age></Person><Person name="<NAME>"><age>67</age></Person></Group>'
There's support for XML namespaces, default field values, case-insensitive
parsing, and more fun stuff. Check out the documentation on the following
classes for more details:
:Model: the base class for objects that map into XML
:Field: the base class for individual model fields
:Meta: meta-information about how to parse/render a model
"""
__ver_major__ = 0
__ver_minor__ = 3
__ver_patch__ = 7
__ver_sub__ = ""
__version__ = "%d.%d.%d%s" % (__ver_major__,__ver_minor__,__ver_patch__,__ver_sub__)
import copy
from xml.dom import minidom
## Local Imports
import fields
from _util import *
class Model(object):
"""Base class for dexml Model objects.
Subclasses of Model represent a concrete type of object that can parsed
from or rendered to an XML document. The mapping to/from XML is controlled
by two things:
* attributes declared on an inner class named 'meta'
* fields declared using instances of fields.Field
Here's a quick example:
class Person(dexml.Model):
# This overrides the default tagname of 'Person'
class meta
tagname = "person"
# This maps to a 'name' attributr on the <person> tag
name = fields.String()
# This maps to an <age> tag within the <person> tag
age = fields.Integer(tagname='age')
See the 'Meta' class in this module for available meta options, and the
'fields' submodule for available field types.
"""
__metaclass__ = ModelMetaclass
_fields = []
def __init__(self,**kwds):
"""Default Model constructor.
Keyword arguments that correspond to declared fields are processed
and assigned to that field.
"""
for f in self._fields:
val = kwds.get(f.field_name)
setattr(self,f.field_name,val)
@classmethod
def parse(cls,xml):
"""Produce an instance of this model from some xml.
The given xml can be a string, a readable file-like object, or
a DOM node; we might add support for more types in the future.
"""
self = cls()
node = self._make_xml_node(xml)
self.validate_xml_node(node)
# Keep track of fields that have successfully parsed something
fields_found = []
# Try to consume all the node's attributes
attrs = node.attributes.values()
for field in self._fields:
unused_attrs = field.parse_attributes(self,attrs)
if len(unused_attrs) < len(attrs):
fields_found.append(field)
attrs = unused_attrs
for attr in attrs:
self._handle_unparsed_node(attr)
# Try to consume all child nodes
if self.meta.order_sensitive:
self._parse_children_ordered(node,self._fields,fields_found)
else:
self._parse_children_unordered(node,self._fields,fields_found)
# Check that all required fields have been found
for field in self._fields:
if field.required and field not in fields_found:
err = "required field not found: '%s'" % (field.field_name,)
raise ParseError(err)
field.parse_done(self)
# All done, return the instance so created
return self
def _parse_children_ordered(self,node,fields,fields_found):
"""Parse the children of the given node using strict field ordering."""
cur_field_idx = 0
for child in node.childNodes:
idx = cur_field_idx
# If we successfully break out of this loop, one of our
# fields has consumed the node.
while idx < len(fields):
field = fields[idx]
res = field.parse_child_node(self,child)
if res is PARSE_DONE:
if field not in fields_found:
fields_found.append(field)
cur_field_idx = idx + 1
break
if res is PARSE_MORE:
if field not in fields_found:
fields_found.append(field)
cur_field_idx = idx
break
if res is PARSE_CHILDREN:
self._parse_children_ordered(child,[field],fields_found)
cur_field_idx = idx
break
idx += 1
else:
self._handle_unparsed_node(child)
def _parse_children_unordered(self,node,fields,fields_found):
"""Parse the children of the given node using loose field ordering."""
done_fields = {}
for child in node.childNodes:
idx = 0
# If we successfully break out of this loop, one of our
# fields has consumed the node.
while idx < len(fields):
if idx in done_fields:
idx += 1
continue
field = fields[idx]
res = field.parse_child_node(self,child)
if res is PARSE_DONE:
done_fields[idx] = True
if field not in fields_found:
fields_found.append(field)
break
if res is PARSE_MORE:
if field not in fields_found:
fields_found.append(field)
break
if res is PARSE_CHILDREN:
self._parse_children_unordered(child,[field],fields_found)
break
idx += 1
else:
self._handle_unparsed_node(child)
def _handle_unparsed_node(self,node):
if not self.meta.ignore_unknown_elements:
if node.nodeType == node.ELEMENT_NODE:
err = "unknown element: %s" % (node.nodeName,)
raise ParseError(err)
elif node.nodeType in (node.TEXT_NODE,node.CDATA_SECTION_NODE):
if node.nodeValue.strip():
err = "unparsed text node: %s" % (node.nodeValue,)
raise ParseError(err)
elif node.nodeType == node.ATTRIBUTE_NODE:
if not node.nodeName.startswith("xml"):
err = "unknown attribute: %s" % (node.name,)
raise ParseError(err)
def render(self,encoding=None,fragment=False,nsmap=None):
"""Produce XML from this model's instance data.
A unicode string will be returned if any of the objects contain
unicode values; specifying the 'encoding' argument forces generation
of an ASCII string.
By default a complete XML document is produced, including the
leading "<?xml>" declaration. To generate an XML fragment set
the 'fragment' argument to True.
The 'nsmap' argument maintains the current stack of namespace
prefixes used during rendering; it maps each prefix to a list of
namespaces, with the first item in the list being the current
namespace for that prefix. This argument should never be given
directly; it is for internal use by the rendering routines.
"""
if nsmap is None:
nsmap = {}
data = []
if not fragment:
if encoding:
s = '<?xml version="1.0" encoding="%s" ?>' % (encoding,)
data.append(s)
else:
data.append('<?xml version="1.0" ?>')
data.extend(self._render(nsmap))
xml = "".join(data)
if encoding:
xml = xml.encode(encoding)
return xml
def _render(self,nsmap):
"""Render this model as an XML fragment."""
# Determine opening and closing tags
pushed_ns = False
if self.meta.namespace:
namespace = self.meta.namespace
prefix = self.meta.namespace_prefix
try:
cur_ns = nsmap[prefix]
except KeyError:
cur_ns = []
nsmap[prefix] = cur_ns
if prefix:
tagname = "%s:%s" % (prefix,self.meta.tagname)
open_tag_contents = [tagname]
if not cur_ns or cur_ns[0] != namespace:
cur_ns.insert(0,namespace)
pushed_ns = True
open_tag_contents.append('xmlns:%s="%s"'%(prefix,namespace))
close_tag_contents = tagname
else:
open_tag_contents = [self.meta.tagname]
if not cur_ns or cur_ns[0] != namespace:
cur_ns.insert(0,namespace)
pushed_ns = True
open_tag_contents.append('xmlns="%s"'%(namespace,))
close_tag_contents = self.meta.tagname
else:
open_tag_contents = [self.meta.tagname]
close_tag_contents = self.meta.tagname
# Find the attributes and child nodes
attrs = []
children = []
num = 0
for f in self._fields:
val = getattr(self,f.field_name)
attrs.extend(f.render_attributes(self,val,nsmap))
children.extend(f.render_children(self,val,nsmap))
if len(attrs) + len(children) == num and f.required:
raise RenderError("Field '%s' is missing" % (f.field_name,))
# Actually construct the XML
if pushed_ns:
nsmap[prefix].pop(0)
open_tag_contents.extend(attrs)
if children:
yield "<%s>" % (" ".join(open_tag_contents),)
for chld in children:
yield chld
yield "</%s>" % (close_tag_contents,)
else:
yield "<%s />" % (" ".join(open_tag_contents),)
@staticmethod
def _make_xml_node(xml):
"""Transform a variety of input formats to an XML DOM node."""
try:
ntype = xml.nodeType
except AttributeError:
if isinstance(xml,basestring):
try:
xml = minidom.parseString(xml)
except Exception, e:
raise XmlError(e)
elif hasattr(xml,"read"):
try:
xml = minidom.parse(xml)
except Exception, e:
raise XmlError(e)
else:
raise ValueError("Can't convert that to an XML DOM node")
node = xml.documentElement
else:
if ntype == xml.DOCUMENT_NODE:
node = xml.documentElement
else:
node = xml
return node
@classmethod
def validate_xml_node(cls,node):
"""Check that the given xml node is valid for this object.
Here 'valid' means that it is the right tag, in the right
namespace. We might add more eventually...
"""
if node.nodeType != node.ELEMENT_NODE:
err = "Class '%s' got a non-element node"
err = err % (cls.__name__,)
raise ParseError(err)
equals = (lambda a, b: a == b) if cls.meta.case_sensitive else (lambda a, b: a.lower() == b.lower())
if not equals(node.localName, cls.meta.tagname):
err = "Class '%s' got tag '%s' (expected '%s')"
err = err % (cls.__name__,node.localName,
cls.meta.tagname)
raise ParseError(err)
if cls.meta.namespace:
if node.namespaceURI != cls.meta.namespace:
err = "Class '%s' got namespace '%s' (expected '%s')"
err = err % (cls.__name__,node.namespaceURI,
cls.meta.namespace)
raise ParseError(err)
else:
if node.namespaceURI:
err = "Class '%s' got namespace '%s' (expected no namespace)"
err = err % (cls.__name__,node.namespaceURI,)
raise ParseError(err)
|
StarcoderdataPython
|
1950756
|
<filename>makeCourse/plastex/appendixnumberbeamer/__init__.py
from plasTeX.PackageResource import (PackageResource, PackageCss, PackageJs, PackageTemplateDir)
from plasTeX import Command, Environment, sourceArguments
def ProcessOptions(options, document):
tpl = PackageTemplateDir(renderers='html5',package='appendixnumberbeamer')
document.addPackageResource([tpl])
class appendix(Command):
args = ''
|
StarcoderdataPython
|
3463498
|
import numpy as np
import matplotlib.pyplot as plt
def generate_linerp_plot(linerp_vals_train, linerp_vals_test, title:str=''):
xs_train = np.linspace(0, 1, len(linerp_vals_train))
xs_test = np.linspace(0, 1, len(linerp_vals_test))
fig = plt.figure(figsize=(8, 5))
if title != '': plt.title(title)
plt.plot(xs_train, linerp_vals_train, label='Train')
plt.plot(xs_test, linerp_vals_test, label='Test')
plt.legend()
plt.xlabel('alpha')
plt.grid()
return fig
def generate_acts_entropy_linerp_plot(linerp_values):
linerp_values = np.array(linerp_values)
xs = np.linspace(0, 1, linerp_values.shape[1])
colors = plt.cm.jet(np.linspace(0, 1, linerp_values.shape[0]))
fig = plt.figure(figsize=(12, 7))
for i, layer_entropies in enumerate(linerp_values):
plt.plot(xs, layer_entropies, label='Layer #%d' % i, color=colors[i])
plt.legend()
plt.xlabel('alpha')
plt.grid()
return fig
def generate_weights_entropy_linerp_plot(values):
xs = np.linspace(0, 1, len(values))
fig = plt.figure(figsize=(7, 5))
plt.plot(xs, values)
plt.xlabel('alpha')
plt.grid()
return fig
|
StarcoderdataPython
|
280797
|
import os
import struct
import re
import logging
NFD_LABEL = "feature.node.kubernetes.io/cpu-power.sst_bf.enabled"
def get_cpu_count():
dirs = os.listdir("/sys/devices/system/cpu")
return len([c for c in dirs if re.search(r"^cpu[0-9]+$", c)])
def read_msr(msr, cpu=0):
try:
with open("/dev/cpu/{}/msr".format(cpu), "rb") as f:
f.seek(msr)
raw = f.read(8)
val = struct.unpack("BBBBBBBB", raw)
return val
except IOError as err:
logging.error("Could not read MSR: {}".format(err))
raise err
def get_cpu_base_frequency():
b = read_msr(0xCE) # MSR_PLATFORM_INFO
# Byte 1 contains the max non-turbo frequecy
base_freq = b[1] * 100
return base_freq
def cpus():
cpus = []
try:
p1 = get_cpu_base_frequency()
except Exception as err:
logging.error("Could not read base freq from MSR: {}".format(err))
try:
p1 = get_cpu_base_frequency_no_msr()
except Exception as err:
logging.error("Could not read base freq from sys fs: {}"
.format(err))
return cpus
for c in range(0, get_cpu_count()):
try:
base = read_cpu_base_freq(c)
if base > p1:
cpus.append(c)
except IOError:
logging.warning(
"Could not read base frequency of CPU {}, skipping".format(c))
return cpus
# reads base frequencies for each core and returns the lowest value
def get_cpu_base_frequency_no_msr():
freqs = []
for c in range(0, get_cpu_count()):
try:
freqs.append(read_cpu_base_freq(c))
except IOError:
logging.warning(
"Could not read base frequency of CPU {}, skipping".format(c))
return min(freqs)
def read_cpu_base_freq(cpu):
base_freq = 0
base_freq_template = "/sys/devices/system/cpu/cpu{}/cpufreq/base_frequency"
base_file_path = base_freq_template.format(cpu)
with open(base_file_path, "r") as f:
# base_frequency reports cores frequency in kHz
base_freq = int(f.readline().rstrip()) / 1000
return base_freq
|
StarcoderdataPython
|
8073452
|
<gh_stars>0
import pkg_resources
from .core import RedisChannelLayer
from .local import RedisLocalChannelLayer
__version__ = pkg_resources.require('asgi_redis')[0].version
|
StarcoderdataPython
|
4803930
|
<reponame>leylop/correlation_viewer
__author__ = 'Diego'
import os
import PyQt4.uic
if __name__ == '__main__':
this_dir=os.path.dirname(__file__)
qt_gui_dir=os.path.join(this_dir,'gui')
PyQt4.uic.compileUiDir(qt_gui_dir)
|
StarcoderdataPython
|
12838132
|
<reponame>ldimaggi/acceptance-testing<gh_stars>0
#
# Copyright The Helm Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import common
from Kind import kind_auth_wrap
needs_cluster = False
class Sh(common.CommandRunner):
def require_cluster(self, require):
global needs_cluster
if require == "True" or require == "true":
needs_cluster = True
else:
needs_cluster = False
def wrap(self, cmd):
global needs_cluster
if needs_cluster == True:
return kind_auth_wrap(cmd)
return cmd
def Run(self, cmd):
self.run_command(self.wrap(cmd))
def should_pass(self, cmd):
self.Run(cmd)
self.return_code_should_be(0)
def should_fail(self, cmd):
self.Run(cmd)
self.return_code_should_not_be(0)
|
StarcoderdataPython
|
1900266
|
class TelegramData:
def __init__(self, autoconv, users):
self.autoconv = autoconv
self.update = None
self.context = None
self.telegram_id = None
self.udata = None
self.sdata = None
self.message = None
self.chat_type = None
self.exception = None
self.users = users
def __str__(self):
return (
f"UPDATE {self.update}\n"
f"CONTEXT {self.context}\n"
f"TELEGRAM ID {self.telegram_id}\n"
f"USER DATA {self.udata}\n"
f"STATES DATA {self.sdata}\n"
f"AUTH USERS {self.users}"
)
# ---- Update functions
def update_telegram_data(self, update, context):
"""Simple info update in TelegramData"""
self.update = update
self.context = context
def prepare(self):
"""Preparing all the update info for dynamic stuff in handler"""
self.message = self.update.message or self.update.callback_query.message
self.telegram_id = self.update.effective_user.id
self.chat_type = self.message.chat.type
self.udata = self.context.user_data
self.sdata = self.udata.get("data") if self.udata else None
self.message = self.update.message
return self
# ---- Public function
def save(self, *args, **kwargs):
"""Save key:value in user_data space"""
self.context.user_data.update(*args, **kwargs)
self.prepare()
def add(self, key, value):
"""Add some value to a already stored key in user_data"""
prev_value = self.udata.get(key)
self.context.user_data.update({key: prev_value + value})
self.prepare()
return prev_value + value
def get_or_set(self, key, set_value):
"""Get or set a value associated to a key in user_data"""
if key not in self.udata:
self.save({key: set_value})
return self.udata.get(key)
def delete(self, key, pop_value=None):
"""Delete a key in user_data"""
value = self.context.user_data.pop(key, pop_value)
self.prepare()
return value
|
StarcoderdataPython
|
300326
|
<reponame>yunzhang599/Python3_Package_Examples<gh_stars>1-10
# full pymongo documentation
# http://api.mongodb.org/python/current/
import pymongo
client = pymongo.MongoClient("localhost", 27017)
db = client.test
print db.name
print db.my_collection
db.my_collection.save({"x": 10})
db.my_collection.save({"x": 8})
db.my_collection.save({"x": 11})
db.my_collection.find_one()
for item in db.my_collection.find():
print item["x"]
db.my_collection.create_index("x")
for item in db.my_collection.find().sort("x", pymongo.ASCENDING):
print item["x"]
print [item["x"] for item in db.my_collection.find().limit(2).skip(1)]
|
StarcoderdataPython
|
1826548
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2018 sqlmap developers (http://sqlmap.org/)
See the file 'LICENSE' for copying permission
"""
import os
import re
import socket
import threading
import time
class DNSQuery(object):
"""
Used for making fake DNS resolution responses based on received
raw request
Reference(s):
http://code.activestate.com/recipes/491264-mini-fake-dns-server/
https://code.google.com/p/marlon-tools/source/browse/tools/dnsproxy/dnsproxy.py
"""
def __init__(self, raw):
self._raw = raw
self._query = ""
type_ = (ord(raw[2]) >> 3) & 15 # Opcode bits
if type_ == 0: # Standard query
i = 12
j = ord(raw[i])
while j != 0:
self._query += raw[i + 1:i + j + 1] + '.'
i = i + j + 1
j = ord(raw[i])
def response(self, resolution):
"""
Crafts raw DNS resolution response packet
"""
retVal = ""
if self._query:
retVal += self._raw[:2] # Transaction ID
retVal += "\x85\x80" # Flags (Standard query response, No error)
retVal += self._raw[4:6] + self._raw[4:6] + "\x00\x00\x00\x00" # Questions and Answers Counts
retVal += self._raw[12:(12 + self._raw[12:].find("\x00") + 5)] # Original Domain Name Query
retVal += "\xc0\x0c" # Pointer to domain name
retVal += "\x00\x01" # Type A
retVal += "\x00\x01" # Class IN
retVal += "\x00\x00\x00\x20" # TTL (32 seconds)
retVal += "\x00\x04" # Data length
retVal += "".join(chr(int(_)) for _ in resolution.split('.')) # 4 bytes of IP
return retVal
class DNSServer(object):
def __init__(self):
self._check_localhost()
self._requests = []
self._lock = threading.Lock()
try:
self._socket = socket._orig_socket(socket.AF_INET, socket.SOCK_DGRAM)
except AttributeError:
self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._socket.bind(("", 53))
self._running = False
self._initialized = False
def _check_localhost(self):
response = ""
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("", 53))
s.send("6509012000010000000000010377777706676f6f676c6503636f6d00000100010000291000000000000000".decode("hex")) # A www.google.com
response = s.recv(512)
except:
pass
finally:
if response and "google" in response:
raise socket.error("another DNS service already running on *:53")
def pop(self, prefix=None, suffix=None):
"""
Returns received DNS resolution request (if any) that has given
prefix/suffix combination (e.g. prefix.<query result>.suffix.domain)
"""
retVal = None
with self._lock:
for _ in self._requests:
if prefix is None and suffix is None or re.search(r"%s\..+\.%s" % (prefix, suffix), _, re.I):
retVal = _
self._requests.remove(_)
break
return retVal
def run(self):
"""
Runs a DNSServer instance as a daemon thread (killed by program exit)
"""
def _():
try:
self._running = True
self._initialized = True
while True:
data, addr = self._socket.recvfrom(1024)
_ = DNSQuery(data)
self._socket.sendto(_.response("127.0.0.1"), addr)
with self._lock:
self._requests.append(_._query)
except KeyboardInterrupt:
raise
finally:
self._running = False
thread = threading.Thread(target=_)
thread.daemon = True
thread.start()
if __name__ == "__main__":
server = None
try:
server = DNSServer()
server.run()
while not server._initialized:
time.sleep(0.1)
while server._running:
while True:
_ = server.pop()
if _ is None:
break
else:
print "[i] %s" % _
time.sleep(1)
except socket.error, ex:
if 'Permission' in str(ex):
print "[x] Please run with sudo/Administrator privileges"
else:
raise
except KeyboardInterrupt:
os._exit(0)
finally:
if server:
server._running = False
|
StarcoderdataPython
|
1933206
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import tempfile
from metrics import loading
from telemetry.core.platform.profiler import perf_profiler
from telemetry.page import page_measurement
from telemetry.value import scalar
class LoadingProfile(page_measurement.PageMeasurement):
options = {'page_repeat': 2}
def __init__(self):
super(LoadingProfile, self).__init__(discard_first_result=True)
@property
def results_are_the_same_on_every_page(self):
return False
def CustomizeBrowserOptions(self, options):
if not perf_profiler.PerfProfiler.is_supported(browser_type='any'):
raise Exception('This measurement is not supported on this platform')
perf_profiler.PerfProfiler.CustomizeBrowserOptions(
browser_type='any', options=options)
def WillNavigateToPage(self, page, tab):
tab.browser.StartProfiling(perf_profiler.PerfProfiler.name(),
os.path.join(tempfile.mkdtemp(),
page.file_safe_name))
def MeasurePage(self, page, tab, results):
# In current telemetry tests, all tests wait for DocumentComplete state,
# but we need to wait for the load event.
tab.WaitForJavaScriptExpression('performance.timing.loadEventStart', 300)
profile_files = tab.browser.StopProfiling()
loading.LoadingMetric().AddResults(tab, results)
profile_file = None
for profile_file in profile_files:
if 'renderer' in profile_file:
break
for function, period in perf_profiler.PerfProfiler.GetTopSamples(
profile_file, 10).iteritems():
results.AddValue(scalar.ScalarValue(
results.current_page, function.replace('.', '_'), 'period', period))
|
StarcoderdataPython
|
6446241
|
<gh_stars>1-10
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from swagger_server.models.base_model_ import Model
from swagger_server import util
class BeaconKnowledgeMapPredicate(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, edge_label: str=None, relation: str=None, negated: bool=None): # noqa: E501
"""BeaconKnowledgeMapPredicate - a model defined in Swagger
:param edge_label: The edge_label of this BeaconKnowledgeMapPredicate. # noqa: E501
:type edge_label: str
:param relation: The relation of this BeaconKnowledgeMapPredicate. # noqa: E501
:type relation: str
:param negated: The negated of this BeaconKnowledgeMapPredicate. # noqa: E501
:type negated: bool
"""
self.swagger_types = {
'edge_label': str,
'relation': str,
'negated': bool
}
self.attribute_map = {
'edge_label': 'edge_label',
'relation': 'relation',
'negated': 'negated'
}
self._edge_label = edge_label
self._relation = relation
self._negated = negated
@classmethod
def from_dict(cls, dikt) -> 'BeaconKnowledgeMapPredicate':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The BeaconKnowledgeMapPredicate of this BeaconKnowledgeMapPredicate. # noqa: E501
:rtype: BeaconKnowledgeMapPredicate
"""
return util.deserialize_model(dikt, cls)
@property
def edge_label(self) -> str:
"""Gets the edge_label of this BeaconKnowledgeMapPredicate.
Human readable name of the 'minimal' standard Biolink Model predicate relationship name. See [Biolink Model](https://biolink.github.io/biolink-model) for the full list of terms. # noqa: E501
:return: The edge_label of this BeaconKnowledgeMapPredicate.
:rtype: str
"""
return self._edge_label
@edge_label.setter
def edge_label(self, edge_label: str):
"""Sets the edge_label of this BeaconKnowledgeMapPredicate.
Human readable name of the 'minimal' standard Biolink Model predicate relationship name. See [Biolink Model](https://biolink.github.io/biolink-model) for the full list of terms. # noqa: E501
:param edge_label: The edge_label of this BeaconKnowledgeMapPredicate.
:type edge_label: str
"""
self._edge_label = edge_label
@property
def relation(self) -> str:
"""Gets the relation of this BeaconKnowledgeMapPredicate.
Human readable name of a 'maximal' Biolink Model or beacon-specific (or Reasoner-specific) predicate relationship name. # noqa: E501
:return: The relation of this BeaconKnowledgeMapPredicate.
:rtype: str
"""
return self._relation
@relation.setter
def relation(self, relation: str):
"""Sets the relation of this BeaconKnowledgeMapPredicate.
Human readable name of a 'maximal' Biolink Model or beacon-specific (or Reasoner-specific) predicate relationship name. # noqa: E501
:param relation: The relation of this BeaconKnowledgeMapPredicate.
:type relation: str
"""
self._relation = relation
@property
def negated(self) -> bool:
"""Gets the negated of this BeaconKnowledgeMapPredicate.
:return: The negated of this BeaconKnowledgeMapPredicate.
:rtype: bool
"""
return self._negated
@negated.setter
def negated(self, negated: bool):
"""Sets the negated of this BeaconKnowledgeMapPredicate.
:param negated: The negated of this BeaconKnowledgeMapPredicate.
:type negated: bool
"""
self._negated = negated
|
StarcoderdataPython
|
1988848
|
from setuptools import setup
import os.path
import sys
setup(
name='pysetupdi',
version='2018.10.22',
packages=['pysetupdi'],
url='https://github.com/gwangyi/pysetupdi',
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
description='Python SetupAPI wrapper',
platforms=['win32'],
entry_points={
'console_scripts': ['pysetupdi=pysetupdi.__main__:main']
}
)
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.